diff --git a/.gitignore b/.gitignore index 9fd9637d7ef47..469aa0d894b10 100644 --- a/.gitignore +++ b/.gitignore @@ -97,6 +97,9 @@ pcre/pcre_chartables.c pcre/pcregrep pcre/pcretest pcre/test*grep +plugin/aws_key_management/aws-sdk-cpp +plugin/aws_key_management/aws_sdk_cpp +plugin/aws_key_management/aws_sdk_cpp-prefix scripts/comp_sql scripts/make_binary_distribution scripts/msql2mysql @@ -179,6 +182,9 @@ storage/myisam/myisamlog storage/myisam/myisampack storage/myisam/rt_test storage/myisam/sp_test +storage/rocksdb/ldb +storage/rocksdb/mysql_ldb +storage/rocksdb/sst_dump storage/tokudb/PerconaFT/buildheader/db.h storage/tokudb/PerconaFT/buildheader/make_tdb storage/tokudb/PerconaFT/buildheader/runcat.sh diff --git a/.gitmodules b/.gitmodules index f08786dd6417b..6419657e501f4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "libmariadb"] path = libmariadb url = https://github.com/MariaDB/mariadb-connector-c +[submodule "storage/rocksdb/rocksdb"] + path = storage/rocksdb/rocksdb + url = https://github.com/facebook/rocksdb.git diff --git a/CMakeLists.txt b/CMakeLists.txt index b7c696bc086ec..5c4e003758897 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -364,6 +364,10 @@ IF(WITH_UNIT_TESTS) ENDIF() SET (MYSQLD_STATIC_PLUGIN_LIBS "" CACHE INTERNAL "") + +# mariadb_connector_c fetches submodules which is useful for plugins +INCLUDE(mariadb_connector_c) # this does ADD_SUBDIRECTORY(libmariadb) + # Add storage engines and plugins. CONFIGURE_PLUGINS() @@ -373,7 +377,6 @@ ADD_SUBDIRECTORY(strings) ADD_SUBDIRECTORY(vio) ADD_SUBDIRECTORY(mysys) ADD_SUBDIRECTORY(mysys_ssl) -INCLUDE(mariadb_connector_c) # this does ADD_SUBDIRECTORY(libmariadb) ADD_SUBDIRECTORY(client) ADD_SUBDIRECTORY(extra) ADD_SUBDIRECTORY(libservices) diff --git a/cmake/FindLZ4.cmake b/cmake/FindLZ4.cmake new file mode 100644 index 0000000000000..e97dd63e2b08c --- /dev/null +++ b/cmake/FindLZ4.cmake @@ -0,0 +1,9 @@ +find_path(LZ4_INCLUDE_DIR NAMES lz4.h) +find_library(LZ4_LIBRARY NAMES lz4) + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS( + LZ4 DEFAULT_MSG + LZ4_LIBRARY LZ4_INCLUDE_DIR) + +mark_as_advanced(LZ4_INCLUDE_DIR LZ4_LIBRARY) diff --git a/cmake/FindZSTD.cmake b/cmake/FindZSTD.cmake new file mode 100644 index 0000000000000..0fd7350132791 --- /dev/null +++ b/cmake/FindZSTD.cmake @@ -0,0 +1,18 @@ +find_path( + ZSTD_INCLUDE_DIR + NAMES "zstd.h" +) + +find_library( + ZSTD_LIBRARY + NAMES zstd +) + +set(ZSTD_LIBRARIES ${ZSTD_LIBRARY}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args( + ZSTD DEFAULT_MSG ZSTD_INCLUDE_DIR ZSTD_LIBRARIES) + +mark_as_advanced(ZSTD_INCLUDE_DIR ZSTD_LIBRARIES ZSTD_FOUND) + diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 67108132d8a12..22f1ff7f30df7 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -63,6 +63,26 @@ IF(MINGW AND CMAKE_SIZEOF_VOID_P EQUAL 4) ENDIF() IF(MSVC) + SET(MSVC_CRT_TYPE /MT CACHE STRING + "Runtime library - specify runtime library for linking (/MT,/MTd,/MD,/MDd)" + ) + SET(VALID_CRT_TYPES /MTd /MDd /MD /MT) + IF (NOT ";${VALID_CRT_TYPES};" MATCHES ";${MSVC_CRT_TYPE};") + MESSAGE(FATAL_ERROR "Invalid value ${MSVC_CRT_TYPE} for MSVC_CRT_TYPE, choose one of /MT,/MTd,/MD,/MDd ") + ENDIF() + + IF(MSVC_CRT_TYPE MATCHES "/MD") + # Dynamic runtime (DLLs), need to install CRT libraries. + SET(CMAKE_INSTALL_MFC_LIBRARIES TRUE)# upgrade wizard + SET(CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT VCCRT) + SET(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS TRUE) + SET(CMAKE_INSTALL_UCRT_LIBRARIES TRUE) + IF(MSVC_CRT_TYPE STREQUAL "/MDd") + SET (CMAKE_INSTALL_DEBUG_LIBRARIES_ONLY TRUE) + ENDIF() + INCLUDE(InstallRequiredSystemLibraries) + ENDIF() + # Enable debug info also in Release build, # and create PDB to be able to analyze crashes. FOREACH(type EXE SHARED MODULE) @@ -77,6 +97,10 @@ IF(MSVC) # information for use with the debugger. The symbolic debugging # information includes the names and types of variables, as well as # functions and line numbers. No .pdb file is produced by the compiler. + # + # - Remove preprocessor flag _DEBUG that older cmakes use with Config=Debug, + # it is as defined by Debug runtimes itself (/MTd /MDd) + FOREACH(lang C CXX) SET(CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE} /Z7") ENDFOREACH() @@ -85,7 +109,8 @@ IF(MSVC) CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) - STRING(REPLACE "/MD" "/MT" "${flag}" "${${flag}}") + STRING(REGEX REPLACE "/M[TD][d]?" "${MSVC_CRT_TYPE}" "${flag}" "${${flag}}" ) + STRING(REGEX REPLACE "/D[ ]?_DEBUG" "" "${flag}" "${${flag}}") STRING(REPLACE "/Zi" "/Z7" "${flag}" "${${flag}}") ENDFOREACH() @@ -117,13 +142,6 @@ IF(MSVC) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /wd4291 /wd4577 /we4099") - IF(CMAKE_SIZEOF_VOID_P MATCHES 8) - # _WIN64 is defined by the compiler itself. - # Yet, we define it here again to work around a bug with Intellisense - # described here: http://tinyurl.com/2cb428. - # Syntax highlighting is important for proper debugger functionality. - ADD_DEFINITIONS("-D_WIN64") - ENDIF() ENDIF() # Always link with socket library diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index 198cba8a46771..b6d1711e6ea49 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -72,6 +72,16 @@ then sed '/mariadb-service-convert/d' -i debian/mariadb-server-10.2.install fi +# Convert gcc version to numberical value. Format is Mmmpp where M is Major +# version, mm is minor version and p is patch. +GCCVERSION=$(gcc -dumpversion | sed -e 's/\.\([0-9][0-9]\)/\1/g' -e 's/\.\([0-9]\)/0\1/g' -e 's/^[0-9]\{3,4\}$/&00/') +# Don't build rocksdb package if gcc version is less than 4.8 or we are running on +# x86 32 bit. +if [[ $GCCVERSION -lt 40800 ]] || [[ $(arch) =~ i[346]86 ]] +then + sed '/Package: mariadb-plugin-rocksdb/,+7d' -i debian/control +fi + # Adjust changelog, add new version echo "Incrementing changelog and starting build scripts" diff --git a/debian/control b/debian/control index d0ecf4bddc33e..c501078f678d0 100644 --- a/debian/control +++ b/debian/control @@ -22,6 +22,7 @@ Build-Depends: bison, libpcre3-dev (>= 2:8.35-3.2~), libreadline-gplv2-dev, libssl-dev, + libsnappy-dev, libsystemd-dev, libxml2-dev, lsb-release, @@ -452,6 +453,14 @@ Description: Connect storage engine for MariaDB other interesting features. This package contains the Connect plugin for MariaDB. +Package: mariadb-plugin-rocksdb +Architecture: any +Depends: mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} +Description: RocksDB storage engine for MariaDB + The RocksDB storage engine is a high performance storage engine, aimed + at maximising storage efficiency while maintaining InnoDB-like performance. + This package contains the RocksDB plugin for MariaDB. + Package: mariadb-plugin-oqgraph Architecture: any Depends: libjudydebian1, mariadb-server-10.2, ${misc:Depends}, ${shlibs:Depends} diff --git a/debian/mariadb-plugin-rocksdb.install b/debian/mariadb-plugin-rocksdb.install new file mode 100644 index 0000000000000..ee45a822e0c42 --- /dev/null +++ b/debian/mariadb-plugin-rocksdb.install @@ -0,0 +1,4 @@ +etc/mysql/conf.d/rocksdb.cnf etc/mysql/mariadb.conf.d +usr/lib/mysql/plugin/ha_rocksdb.so +usr/bin/mysql_ldb +usr/bin/sst_dump diff --git a/include/my_bit.h b/include/my_bit.h index e7fd90f7b3055..01cad95792c83 100644 --- a/include/my_bit.h +++ b/include/my_bit.h @@ -25,7 +25,6 @@ C_MODE_START -extern const char _my_bits_nbits[256]; extern const uchar _my_bits_reverse_table[256]; /* @@ -40,37 +39,32 @@ static inline uint my_bit_log2(ulong value) return bit; } -static inline uint my_count_bits(ulonglong v) + +/* +Count bits in 32bit integer + + Algorithm by Sean Anderson, according to: + http://graphics.stanford.edu/~seander/bithacks.html + under "Counting bits set, in parallel" + + (Orignal code public domain). +*/ +static inline uint my_count_bits_uint32(uint32 v) { -#if SIZEOF_LONG_LONG > 4 - /* The following code is a bit faster on 16 bit machines than if we would - only shift v */ - ulong v2=(ulong) (v >> 32); - return (uint) (uchar) (_my_bits_nbits[(uchar) v] + - _my_bits_nbits[(uchar) (v >> 8)] + - _my_bits_nbits[(uchar) (v >> 16)] + - _my_bits_nbits[(uchar) (v >> 24)] + - _my_bits_nbits[(uchar) (v2)] + - _my_bits_nbits[(uchar) (v2 >> 8)] + - _my_bits_nbits[(uchar) (v2 >> 16)] + - _my_bits_nbits[(uchar) (v2 >> 24)]); -#else - return (uint) (uchar) (_my_bits_nbits[(uchar) v] + - _my_bits_nbits[(uchar) (v >> 8)] + - _my_bits_nbits[(uchar) (v >> 16)] + - _my_bits_nbits[(uchar) (v >> 24)]); -#endif + v = v - ((v >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; } -static inline uint my_count_bits_uint32(uint32 v) + +static inline uint my_count_bits(ulonglong x) { - return (uint) (uchar) (_my_bits_nbits[(uchar) v] + - _my_bits_nbits[(uchar) (v >> 8)] + - _my_bits_nbits[(uchar) (v >> 16)] + - _my_bits_nbits[(uchar) (v >> 24)]); + return my_count_bits_uint32((uint32)x) + my_count_bits_uint32((uint32)(x >> 32)); } + + /* Next highest power of two diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index a5bfa1bbc9e88..2f077d8440e97 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -393,6 +393,23 @@ DECLARE_MYSQL_SYSVAR_SIMPLE(name, unsigned long long) = { \ PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ #name, comment, check, update, &varname, def, min, max, blk } +#define MYSQL_SYSVAR_UINT64_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, uint64_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } + +#ifdef _WIN64 +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#else +#define MYSQL_SYSVAR_SIZE_T(name, varname, opt, comment, check, update, def, min, max, blk) \ +DECLARE_MYSQL_SYSVAR_SIMPLE(name, size_t) = { \ + PLUGIN_VAR_LONG | PLUGIN_VAR_UNSIGNED | ((opt) & PLUGIN_VAR_MASK), \ + #name, comment, check, update, &varname, def, min, max, blk } +#endif + #define MYSQL_SYSVAR_ENUM(name, varname, opt, comment, check, update, def, typelib) \ DECLARE_MYSQL_SYSVAR_TYPELIB(name, unsigned long) = { \ PLUGIN_VAR_ENUM | ((opt) & PLUGIN_VAR_MASK), \ diff --git a/mysql-test/collections/buildbot_suites.bat b/mysql-test/collections/buildbot_suites.bat new file mode 100644 index 0000000000000..f91692d291808 --- /dev/null +++ b/mysql-test/collections/buildbot_suites.bat @@ -0,0 +1,5 @@ +perl mysql-test-run.pl --verbose-restart --force --testcase-timeout=45 --suite-timeout=600 --max-test-fail=500 --retry=3 --parallel=4 --suite=^ +main,^ +innodb,^ +plugins,^ +rocksdb diff --git a/mysql-test/extra/rpl_tests/rpl_checksum.inc b/mysql-test/extra/rpl_tests/rpl_checksum.inc index 8423d2fc1cb6e..28d16658a7c78 100644 --- a/mysql-test/extra/rpl_tests/rpl_checksum.inc +++ b/mysql-test/extra/rpl_tests/rpl_checksum.inc @@ -305,7 +305,6 @@ if(!$log_error_) let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; } --let SEARCH_FILE= $log_error_ ---let SEARCH_RANGE=-50000 --let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 --source include/search_pattern_in_file.inc diff --git a/mysql-test/include/kill_and_restart_mysqld.inc b/mysql-test/include/kill_and_restart_mysqld.inc deleted file mode 100644 index f2ac9b504d226..0000000000000 --- a/mysql-test/include/kill_and_restart_mysqld.inc +++ /dev/null @@ -1,19 +0,0 @@ ---let $_server_id= `SELECT @@server_id` ---let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect - -if ($restart_parameters) -{ - --echo # Kill and restart: $restart_parameters - --exec echo "restart: $restart_parameters" > $_expect_file_name -} -if (!$restart_parameters) -{ - --echo # Kill and restart - --exec echo "restart" > $_expect_file_name -} - ---shutdown_server 0 ---source include/wait_until_disconnected.inc ---enable_reconnect ---source include/wait_until_connected_again.inc ---disable_reconnect diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index f77a7c6091607..3c5529989bbc9 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -12,37 +12,22 @@ # # Optionally, SEARCH_RANGE can be set to the max number of bytes of the file # to search. If negative, it will search that many bytes at the end of the -# file. The default is to search only the first 50000 bytes of the file. +# file. By default the search happens from the last CURRENT_TEST: +# marker till the end of file (appropriate for searching error logs). +# +# Optionally, SEARCH_ABORT can be set to "FOUND" or "NOT FOUND" and this +# will abort if the search result doesn't match the requested one. # # In case of # - SEARCH_FILE and/or SEARCH_PATTERN is not set # - SEARCH_FILE cannot be opened -# - SEARCH_FILE does not contain SEARCH_PATTERN # the test will abort immediate. -# MTR will report something like -# .... -# worker[1] Using MTR_BUILD_THREAD 300, with reserved ports 13000..13009 -# main.1st [ pass ] 3 -# innodb.innodb_page_size [ fail ] -# Test ended at 2011-11-11 18:15:58 -# -# CURRENT_TEST: innodb.innodb_page_size -# # ERROR: The file '' does not contain the expected pattern -# mysqltest: In included file "./include/search_pattern_in_file.inc": -# included from ./include/search_pattern_in_file.inc at line 36: -# At line 25: command "perl" failed with error 255. my_errno=175 -# -# The result from queries just before the failure was: -# ... -# - saving '' to '' -# main.1st [ pass ] 2 # # Typical use case (check invalid server startup options): # let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err; # --error 0,1 # --remove_file $error_log # let SEARCH_FILE= $error_log; -# let SEARCH_RANGE= -50000; # # Stop the server # let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; # --exec echo "wait" > $restart_file @@ -60,36 +45,36 @@ perl; use strict; - die "SEARCH_FILE not set" unless $ENV{'SEARCH_FILE'}; - my @search_files= glob($ENV{'SEARCH_FILE'}); - my $search_pattern= $ENV{'SEARCH_PATTERN'} or die "SEARCH_PATTERN not set"; - my $search_range= $ENV{'SEARCH_RANGE'}; + die "SEARCH_FILE not set" unless $ENV{SEARCH_FILE}; + my @search_files= glob($ENV{SEARCH_FILE}); + my $search_pattern= $ENV{SEARCH_PATTERN} or die "SEARCH_PATTERN not set"; + my $search_range= $ENV{SEARCH_RANGE}; my $content; - $search_range= 50000 unless $search_range =~ /-?[0-9]+/; foreach my $search_file (@search_files) { - open(FILE, '<', $search_file) or die("Unable to open '$search_file': $!\n"); + open(FILE, '<', $search_file) || die("Can't open file $search_file: $!"); my $file_content; - if ($search_range >= 0) { + if ($search_range > 0) { read(FILE, $file_content, $search_range, 0); - } else { + } elsif ($search_range < 0) { my $size= -s $search_file; $search_range = -$size if $size > -$search_range; seek(FILE, $search_range, 2); read(FILE, $file_content, -$search_range, 0); + } else { + while() { # error log + if (/^CURRENT_TEST:/) { + $content=''; + } else { + $content.=$_; + } + } } close(FILE); $content.= $file_content; } - $ENV{'SEARCH_FILE'} =~ s{^.*?([^/\\]+)$}{$1}; - if ($content =~ m{$search_pattern}) { - die "FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - if $ENV{SEARCH_ABORT} eq 'FOUND'; - print "FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - unless defined $ENV{SEARCH_ABORT}; - } else { - die "NOT FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - if $ENV{SEARCH_ABORT} eq 'NOT FOUND'; - print "NOT FOUND /$search_pattern/ in $ENV{'SEARCH_FILE'}\n" - unless defined $ENV{SEARCH_ABORT}; - } + my @matches=($content =~ m/$search_pattern/gs); + my $res=@matches ? "FOUND " . scalar(@matches) : "NOT FOUND"; + $ENV{SEARCH_FILE} =~ s{^.*?([^/\\]+)$}{$1}; + print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n"; + exit $ENV{SEARCH_ABORT} && $res =~ /^$ENV{SEARCH_ABORT}/; EOF diff --git a/mysql-test/r/ctype_collate.result b/mysql-test/r/ctype_collate.result index a91e583f21f2c..5e8c5adac8fa6 100644 --- a/mysql-test/r/ctype_collate.result +++ b/mysql-test/r/ctype_collate.result @@ -719,3 +719,38 @@ DROP FUNCTION getText; DROP DATABASE test1; USE test; SET NAMES latin1; +# +# MDEV-11320, MySQL BUG#81810: Inconsistent sort order for blob/text between InnoDB and filesort +# +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", +KEY b (b(32)) +); +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); +drop table t1; +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", +PRIMARY KEY b (b(32)) +); +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); +explain +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range PRIMARY PRIMARY 34 NULL 4 Using where +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; +hex(b) +00 +01 + +61 +explain +select hex(b) from t1 where b<'zzz' order by b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using filesort +select hex(b) from t1 where b<'zzz' order by b; +hex(b) +00 +01 + +61 +drop table t1; diff --git a/mysql-test/r/events_slowlog.result b/mysql-test/r/events_slowlog.result index 7de5925bc0fea..be0a1e78d2af6 100644 --- a/mysql-test/r/events_slowlog.result +++ b/mysql-test/r/events_slowlog.result @@ -6,7 +6,7 @@ set global long_query_time=0.2; create table t1 (i int); insert into t1 values (0); create event ev on schedule at CURRENT_TIMESTAMP + INTERVAL 1 second do update t1 set i=1+sleep(0.5); -FOUND /update t1 set i=1/ in mysqld-slow.log +FOUND 1 /update t1 set i=1/ in mysqld-slow.log drop table t1; set global event_scheduler= @event_scheduler_save; set global slow_query_log= @slow_query_log_save; diff --git a/mysql-test/r/lowercase_fs_on.result b/mysql-test/r/lowercase_fs_on.result index b844b3f77dde4..ddf3fd5f1fbeb 100644 --- a/mysql-test/r/lowercase_fs_on.result +++ b/mysql-test/r/lowercase_fs_on.result @@ -1,4 +1,4 @@ # # Bug#20198490 : LOWER_CASE_TABLE_NAMES=0 ON WINDOWS LEADS TO PROBLEMS # -FOUND /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err +FOUND 1 /\[ERROR\] The server option \'lower_case_table_names\' is configured to use case sensitive table names/ in my_restart.err diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index d2132c5e9542d..f7eb5db646816 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -1111,8 +1111,8 @@ length(c1) c1 0 SELECT DISTINCT length(c1), c1 FROM t1 ORDER BY c1; length(c1) c1 -0 2 A +0 2 B DROP TABLE t1; End of 4.1 tests diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index b46115c26f978..1243c455e6cef 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -5533,4 +5533,4 @@ USE `db1`; DROP DATABASE db1; DROP DATABASE db2; -FOUND /Database: mysql/ in bug11505.sql +FOUND 1 /Database: mysql/ in bug11505.sql diff --git a/mysql-test/r/named_pipe.result b/mysql-test/r/named_pipe.result index 89b3881eb5d21..66da9a874b4a8 100644 --- a/mysql-test/r/named_pipe.result +++ b/mysql-test/r/named_pipe.result @@ -2157,4 +2157,4 @@ Warning 1052 Column 'kundentyp' in group statement is ambiguous drop table t1; connection default; disconnect pipe_con; -FOUND /\[ERROR\] Create named pipe failed/ in second-mysqld.err +FOUND 1 /\[ERROR\] Create named pipe failed/ in second-mysqld.err diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 5027fffe04729..28f5cf635d094 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -2332,3 +2332,677 @@ DROP TABLE t1; # # End of 10.1 tests # +# +# MDEV-10454: range access keys extracted +# from IN () +# +create table t1(a int, b int, c varchar(16), key idx(a,b)) engine=myisam; +insert into t1 values +(1,1,'xx'), (2,2,'yyy'), (3,3,'zzzz'), (1,2,'zz'), (1,3,'x'), +(2,3,'yy'), (4,5,'ww'), (7,8,'xxxxx'), (4,3,'zyx'), (1,2,'uuu'), +(2,1,'w'), (5,5,'wx'), (2,3,'ww'), (7,7,'xxxyy'), (3,3,'zyxw'), +(3,2,'uuuw'), (2,2,'wxz'), (5,5,'xw'), (12,12,'xx'), (12,12,'y'), +(13,13,'z'), (11,12,'zz'), (11,13,'x'), (12,13,'y'), (14,15,'w'), +(17,18,'xx'), (14,13,'zx'), (11,12,'u'), (12,11,'w'), (5,5,'wx'), +(12,13,'ww'), (17,17,'xxxyy'), (13,13,'zyxw'), (13,12,'uuuw'), (12,12,'wxz'), +(15,15,'xw'), (1,1,'xa'), (2,2,'yya'), (3,3,'zzza'), (1,2,'za'), +(1,3,'xb'), (2,3,'ya'), (4,5,'wa'), (7,8,'xxxxa'), (4,3,'zya'), +(1,2,'uua'), (2,1,'wb'), (5,5,'wc'), (2,3,'wa'), (7,7,'xxxya'), +(3,3,'zyxa'), (3,2,'uuua'), (2,2,'wxa'), (5,5,'xa'), (12,12,'xa'), +(22,12,'yb'), (23,13,'zb'), (21,12,'za'), (24,13,'c'), (32,13,'d'), +(34,15,'wd'), (47,18,'xa'), (54,13,'za'), (51,12,'ub'), (52,11,'wc'), +(5,5,'wd'), (62,13,'wa'), (67,17,'xxxya'), (63,13,'zyxa'), (73,12,'uuua'), +(82,12,'wxa'), (85,15,'xd'); +# range access to t1 by 2-component keys for index idx +explain select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 10 NULL 7 Using where +explain format=json select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "10", + "used_key_parts": ["a", "b"], + "rows": 7, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in (((2,3)),((3,3)),((8,8)),((7,7)))" + } + } +} +select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +a b c +2 3 yy +2 3 ww +2 3 ya +2 3 wa +3 3 zzzz +3 3 zyxw +3 3 zzza +3 3 zyxa +7 7 xxxyy +7 7 xxxya +prepare stmt from "select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7))"; +execute stmt; +a b c +2 3 yy +2 3 ww +2 3 ya +2 3 wa +3 3 zzzz +3 3 zyxw +3 3 zzza +3 3 zyxa +7 7 xxxyy +7 7 xxxya +execute stmt; +a b c +2 3 yy +2 3 ww +2 3 ya +2 3 wa +3 3 zzzz +3 3 zyxw +3 3 zzza +3 3 zyxa +7 7 xxxyy +7 7 xxxya +deallocate prepare stmt; +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where +explain format=json select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b + t1.a) in (((4,9)),((8,8)),((7,7)))" + } + } +} +select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +a b c +4 5 ww +4 5 wa +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where +explain format=json select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in ((4,t1.a - 1),(8,t1.a + 8),(7,t1.a + 7))" + } + } +} +select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +a b c +4 3 zyx +4 3 zya +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='index_merge=off'; +create table t2( +d int, e int, key idx1(d), key idx2(e), f varchar(32) +) engine=myisam; +insert into t2 values +(9,5,'a'), (9,8,'b'), (9,3,'c'), (9,2,'d'), (9,1,'e'), +(6,5,'f'), (6,3,'g'), (6,7,'h'), (3,3,'i'), (6,2,'j'), +(9,5,'aa'), (9,8,'ba'), (9,3,'ca'), (2,2,'da'), (9,1,'ea'), +(6,5,'fa'), (6,3,'ga'), (6,7,'ha'), (9,3,'ia'), (6,2,'ja'); +# join order: (t2,t1) with ref access of t1 +# range access to t1 by keys for index idx1 +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 3 Using index condition; Using where +1 SIMPLE t1 ref idx idx 5 test.t2.d 8 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "rows": 3, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((3,3)),((7,7)),((2,2)))" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 8, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +a b c d e f +2 1 w 2 2 da +2 1 wb 2 2 da +2 2 yyy 2 2 da +2 2 wxz 2 2 da +2 2 yya 2 2 da +2 2 wxa 2 2 da +2 3 yy 2 2 da +2 3 ww 2 2 da +2 3 ya 2 2 da +2 3 wa 2 2 da +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +insert into t2 values +(4,5,'a'), (7,8,'b'), (4,3,'c'), (1,2,'d'), (2,1,'e'), (5,5,'f'), +(2,3,'g'), (7,7,'h'), (3,3,'i'), (3,2,'j'), (2,2,'k'), (5,5,'l'), +(4,5,'aa'), (7,8,'bb'), (4,3,'cc'), (1,2,'dd'), (2,1,'ee'), (9,5,'ff'), +(2,3,'gg'), (7,7,'hh'), (3,3,'ii'), (3,2,'jj'), (2,2,'kk'), (9,5,'ll'), +(4,5,'aaa'), (7,8,'bbb'), (4,3,'ccc'), (1,2,'ddd'), (2,1,'eee'), (5,5,'fff'), +(2,3,'ggg'), (7,7,'hhh'), (3,3,'iii'), (3,2,'jjj'), (2,2,'kkk'), (5,5,'lll'), +(14,15,'a'), (17,18,'b'), (14,13,'c'), (11,12,'d'), (12,11,'e'), (15,15,'f'), +(12,13,'g'), (17,17,'h'), (13,13,'i'), (13,12,'j'), (12,12,'k'), (15,15,'l'), +(24,25,'a'), (27,28,'b'), (24,23,'c'), (21,22,'d'), (22,21,'e'), (25,25,'f'), +(22,23,'g'), (27,27,'h'), (23,23,'i'), (23,22,'j'), (22,22,'k'), (25,25,'l'), +(34,35,'a'), (37,38,'b'), (34,33,'c'), (31,32,'d'), (32,31,'e'), (35,35,'f'), +(32,33,'g'), (37,37,'h'), (33,33,'i'), (33,32,'j'), (32,32,'k'), (35,35,'l'), +(44,45,'a'), (47,48,'b'), (44,43,'c'), (41,42,'d'), (42,41,'e'), (45,45,'f'), +(42,43,'g'), (47,47,'h'), (43,43,'i'), (43,42,'j'), (42,42,'k'), (45,45,'l'); +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 6 Using index condition +1 SIMPLE t2 ref idx1,idx2 idx1 5 test.t1.a 12 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 6, + "filtered": 100, + "index_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 12, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in (((3,3)),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +a b c d e f +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +prepare stmt from "select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1"; +execute stmt; +a b c d e f +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +execute stmt; +a b c d e f +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +deallocate prepare stmt; +insert into t1 select * from t1; +# join order: (t2,t1) with ref access of t1 +# range access to t2 by keys for index idx2 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx2 5 NULL 6 Using where +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["e"], + "rows": 6, + "filtered": 100, + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1 and t2.d is not null" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +alter table t2 drop index idx1, drop index idx2, add index idx3(d,e); +# join order: (t2,t1) with ref access of t1 +# range access to t2 by 2-component keys for index idx3 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx3 idx3 10 NULL 5 Using index condition; Using where +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "10", + "used_key_parts": ["d", "e"], + "rows": 5, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 15 Using index condition +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 15, + "filtered": 100, + "index_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((4,t1.a + 1),(7,t1.a + 1),(8,t1.a + 1)) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +a b c d e f +4 3 zyx 4 5 a +4 3 zya 4 5 a +4 3 zyx 4 5 a +4 3 zya 4 5 a +4 5 ww 4 5 a +4 5 wa 4 5 a +4 5 ww 4 5 a +4 5 wa 4 5 a +7 7 xxxyy 7 8 b +7 7 xxxya 7 8 b +7 7 xxxyy 7 8 b +7 7 xxxya 7 8 b +7 8 xxxxx 7 8 b +7 8 xxxxa 7 8 b +7 8 xxxxx 7 8 b +7 8 xxxxa 7 8 b +# join order: (t1,t2) with ref access of t2 +# no range access +explain select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL idx NULL NULL NULL 144 Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "possible_keys": ["idx"], + "rows": 144, + "filtered": 100, + "attached_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((t2.e,t1.a + 1),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,2) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +prepare stmt from "select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +deallocate prepare stmt; +create table t3 (id int primary key, v int) engine=myisam; +insert into t3 values +(3,2), (1,1), (4,12), (2,15); +# join order: (t3,t1,t2) with const t3 and ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1 +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "const", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["const"], + "rows": 1, + "filtered": 100 + }, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,1 + 1) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f id v +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +# IN predicate is always FALSE +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +prepare stmt from "select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f id v +execute stmt; +a b c d e f id v +deallocate prepare stmt; +set optimizer_switch=@save_optimizer_switch; +drop table t1,t2,t3; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/range_mrr_icp.result b/mysql-test/r/range_mrr_icp.result index 7d009070150fe..f2860aaab7678 100644 --- a/mysql-test/r/range_mrr_icp.result +++ b/mysql-test/r/range_mrr_icp.result @@ -2334,4 +2334,688 @@ DROP TABLE t1; # # End of 10.1 tests # +# +# MDEV-10454: range access keys extracted +# from IN () +# +create table t1(a int, b int, c varchar(16), key idx(a,b)) engine=myisam; +insert into t1 values +(1,1,'xx'), (2,2,'yyy'), (3,3,'zzzz'), (1,2,'zz'), (1,3,'x'), +(2,3,'yy'), (4,5,'ww'), (7,8,'xxxxx'), (4,3,'zyx'), (1,2,'uuu'), +(2,1,'w'), (5,5,'wx'), (2,3,'ww'), (7,7,'xxxyy'), (3,3,'zyxw'), +(3,2,'uuuw'), (2,2,'wxz'), (5,5,'xw'), (12,12,'xx'), (12,12,'y'), +(13,13,'z'), (11,12,'zz'), (11,13,'x'), (12,13,'y'), (14,15,'w'), +(17,18,'xx'), (14,13,'zx'), (11,12,'u'), (12,11,'w'), (5,5,'wx'), +(12,13,'ww'), (17,17,'xxxyy'), (13,13,'zyxw'), (13,12,'uuuw'), (12,12,'wxz'), +(15,15,'xw'), (1,1,'xa'), (2,2,'yya'), (3,3,'zzza'), (1,2,'za'), +(1,3,'xb'), (2,3,'ya'), (4,5,'wa'), (7,8,'xxxxa'), (4,3,'zya'), +(1,2,'uua'), (2,1,'wb'), (5,5,'wc'), (2,3,'wa'), (7,7,'xxxya'), +(3,3,'zyxa'), (3,2,'uuua'), (2,2,'wxa'), (5,5,'xa'), (12,12,'xa'), +(22,12,'yb'), (23,13,'zb'), (21,12,'za'), (24,13,'c'), (32,13,'d'), +(34,15,'wd'), (47,18,'xa'), (54,13,'za'), (51,12,'ub'), (52,11,'wc'), +(5,5,'wd'), (62,13,'wa'), (67,17,'xxxya'), (63,13,'zyxa'), (73,12,'uuua'), +(82,12,'wxa'), (85,15,'xd'); +# range access to t1 by 2-component keys for index idx +explain select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 10 NULL 7 Using where; Rowid-ordered scan +explain format=json select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "10", + "used_key_parts": ["a", "b"], + "rows": 7, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in (((2,3)),((3,3)),((8,8)),((7,7)))", + "mrr_type": "Rowid-ordered scan" + } + } +} +select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +a b c +3 3 zzzz +2 3 yy +2 3 ww +7 7 xxxyy +3 3 zyxw +3 3 zzza +2 3 ya +2 3 wa +7 7 xxxya +3 3 zyxa +prepare stmt from "select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7))"; +execute stmt; +a b c +3 3 zzzz +2 3 yy +2 3 ww +7 7 xxxyy +3 3 zyxw +3 3 zzza +2 3 ya +2 3 wa +7 7 xxxya +3 3 zyxa +execute stmt; +a b c +3 3 zzzz +2 3 yy +2 3 ww +7 7 xxxyy +3 3 zyxw +3 3 zzza +2 3 ya +2 3 wa +7 7 xxxya +3 3 zyxa +deallocate prepare stmt; +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where; Rowid-ordered scan +explain format=json select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b + t1.a) in (((4,9)),((8,8)),((7,7)))", + "mrr_type": "Rowid-ordered scan" + } + } +} +select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +a b c +4 5 ww +4 5 wa +# range access to t1 by 1-component keys for index idx +explain select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 5 Using where; Rowid-ordered scan +explain format=json select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 5, + "filtered": 100, + "attached_condition": "(t1.a,t1.b) in ((4,t1.a - 1),(8,t1.a + 8),(7,t1.a + 7))", + "mrr_type": "Rowid-ordered scan" + } + } +} +select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +a b c +4 3 zyx +4 3 zya +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='index_merge=off'; +create table t2( +d int, e int, key idx1(d), key idx2(e), f varchar(32) +) engine=myisam; +insert into t2 values +(9,5,'a'), (9,8,'b'), (9,3,'c'), (9,2,'d'), (9,1,'e'), +(6,5,'f'), (6,3,'g'), (6,7,'h'), (3,3,'i'), (6,2,'j'), +(9,5,'aa'), (9,8,'ba'), (9,3,'ca'), (2,2,'da'), (9,1,'ea'), +(6,5,'fa'), (6,3,'ga'), (6,7,'ha'), (9,3,'ia'), (6,2,'ja'); +# join order: (t2,t1) with ref access of t1 +# range access to t1 by keys for index idx1 +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 3 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t1 ref idx idx 5 test.t2.d 8 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "rows": 3, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((3,3)),((7,7)),((2,2)))", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 8, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(2,2)); +a b c d e f +3 2 uuuw 3 3 i +3 2 uuua 3 3 i +3 3 zzzz 3 3 i +3 3 zyxw 3 3 i +3 3 zzza 3 3 i +3 3 zyxa 3 3 i +2 1 w 2 2 da +2 1 wb 2 2 da +2 2 yyy 2 2 da +2 2 wxz 2 2 da +2 2 yya 2 2 da +2 2 wxa 2 2 da +2 3 yy 2 2 da +2 3 ww 2 2 da +2 3 ya 2 2 da +2 3 wa 2 2 da +insert into t2 values +(4,5,'a'), (7,8,'b'), (4,3,'c'), (1,2,'d'), (2,1,'e'), (5,5,'f'), +(2,3,'g'), (7,7,'h'), (3,3,'i'), (3,2,'j'), (2,2,'k'), (5,5,'l'), +(4,5,'aa'), (7,8,'bb'), (4,3,'cc'), (1,2,'dd'), (2,1,'ee'), (9,5,'ff'), +(2,3,'gg'), (7,7,'hh'), (3,3,'ii'), (3,2,'jj'), (2,2,'kk'), (9,5,'ll'), +(4,5,'aaa'), (7,8,'bbb'), (4,3,'ccc'), (1,2,'ddd'), (2,1,'eee'), (5,5,'fff'), +(2,3,'ggg'), (7,7,'hhh'), (3,3,'iii'), (3,2,'jjj'), (2,2,'kkk'), (5,5,'lll'), +(14,15,'a'), (17,18,'b'), (14,13,'c'), (11,12,'d'), (12,11,'e'), (15,15,'f'), +(12,13,'g'), (17,17,'h'), (13,13,'i'), (13,12,'j'), (12,12,'k'), (15,15,'l'), +(24,25,'a'), (27,28,'b'), (24,23,'c'), (21,22,'d'), (22,21,'e'), (25,25,'f'), +(22,23,'g'), (27,27,'h'), (23,23,'i'), (23,22,'j'), (22,22,'k'), (25,25,'l'), +(34,35,'a'), (37,38,'b'), (34,33,'c'), (31,32,'d'), (32,31,'e'), (35,35,'f'), +(32,33,'g'), (37,37,'h'), (33,33,'i'), (33,32,'j'), (32,32,'k'), (35,35,'l'), +(44,45,'a'), (47,48,'b'), (44,43,'c'), (41,42,'d'), (42,41,'e'), (45,45,'f'), +(42,43,'g'), (47,47,'h'), (43,43,'i'), (43,42,'j'), (42,42,'k'), (45,45,'l'); +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 6 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref idx1,idx2 idx1 5 test.t1.a 12 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 6, + "filtered": 100, + "index_condition": "t1.a is not null", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 12, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in (((3,3)),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +a b c d e f +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +prepare stmt from "select * from t1,t2 +where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1"; +execute stmt; +a b c d e f +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +execute stmt; +a b c d e f +3 3 zzzz 3 3 i +3 3 zzzz 3 3 i +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +3 3 zyxw 3 3 i +3 3 zyxw 3 3 i +3 2 uuuw 3 3 i +3 2 uuuw 3 3 i +3 3 zzza 3 3 i +3 3 zzza 3 3 i +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +3 3 zyxa 3 3 i +3 3 zyxa 3 3 i +3 2 uuua 3 3 i +3 2 uuua 3 3 i +deallocate prepare stmt; +insert into t1 select * from t1; +# join order: (t2,t1) with ref access of t1 +# range access to t2 by keys for index idx2 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx1,idx2 idx2 5 NULL 6 Using where; Rowid-ordered scan +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["e"], + "rows": 6, + "filtered": 100, + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1 and t2.d is not null", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +alter table t2 drop index idx1, drop index idx2, add index idx3(d,e); +# join order: (t2,t1) with ref access of t1 +# range access to t2 by 2-component keys for index idx3 +explain select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range idx3 idx3 10 NULL 5 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t1 ref idx idx 5 test.t2.d 11 +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "range", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "10", + "used_key_parts": ["d", "e"], + "rows": 5, + "filtered": 100, + "index_condition": "t2.d is not null", + "attached_condition": "(t2.d,t2.e) in (((4,4)),((7,7)),((8,8))) and length(t2.f) = 1", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.d"], + "rows": 11, + "filtered": 100 + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 7 xxxyy 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +7 8 xxxxx 7 7 h +7 8 xxxxa 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 15 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 15, + "filtered": 100, + "index_condition": "t1.a is not null", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((4,t1.a + 1),(7,t1.a + 1),(8,t1.a + 1)) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +a b c d e f +4 5 ww 4 5 a +7 8 xxxxx 7 8 b +4 3 zyx 4 5 a +7 7 xxxyy 7 8 b +4 5 wa 4 5 a +7 8 xxxxa 7 8 b +4 3 zya 4 5 a +7 7 xxxya 7 8 b +4 5 ww 4 5 a +7 8 xxxxx 7 8 b +4 3 zyx 4 5 a +7 7 xxxyy 7 8 b +4 5 wa 4 5 a +7 8 xxxxa 7 8 b +4 3 zya 4 5 a +7 7 xxxya 7 8 b +# join order: (t1,t2) with ref access of t2 +# no range access +explain select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL idx NULL NULL NULL 144 Using where +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "ALL", + "possible_keys": ["idx"], + "rows": 144, + "filtered": 100, + "attached_condition": "t1.a is not null" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "(t1.a,t2.e) in ((t2.e,t1.a + 1),((7,7)),((8,8))) and length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +a b c d e f +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +7 8 xxxxx 7 7 h +7 7 xxxyy 7 7 h +7 8 xxxxa 7 7 h +7 7 xxxya 7 7 h +# join order: (t1,t2) with ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,2) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +prepare stmt from "select * from t1,t2 +where a = d and (a,2) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +execute stmt; +a b c d e f +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +2 1 w 2 1 e +2 1 w 2 2 k +2 1 w 2 3 g +deallocate prepare stmt; +create table t3 (id int primary key, v int) engine=myisam; +insert into t3 values +(3,2), (1,1), (4,12), (2,15); +# join order: (t3,t1,t2) with const t3 and ref access of t2 +# range access to t1 by 1-component keys for index idx +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1 +1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where +explain format=json select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "const", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["const"], + "rows": 1, + "filtered": 100 + }, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx"], + "key": "idx", + "key_length": "5", + "used_key_parts": ["a"], + "rows": 13, + "filtered": 100, + "index_condition": "t1.a is not null", + "attached_condition": "(t1.a,1 + 1) in (((2,2)),((7,7)),((8,8))) and length(t1.c) = 1", + "mrr_type": "Rowid-ordered scan" + }, + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": ["idx3"], + "key": "idx3", + "key_length": "5", + "used_key_parts": ["d"], + "ref": ["test.t1.a"], + "rows": 3, + "filtered": 100, + "attached_condition": "length(t2.f) = 1" + } + } +} +select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((2,2),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +a b c d e f id v +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +2 1 w 2 1 e 1 1 +2 1 w 2 2 k 1 1 +2 1 w 2 3 g 1 1 +# IN predicate is always FALSE +explain select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +prepare stmt from "select * from t1,t2,t3 +where id = 1 and a = d and +(a,v+1) in ((9,9),(7,7),(8,8)) and +length(c) = 1 and length(f) = 1"; +execute stmt; +a b c d e f id v +execute stmt; +a b c d e f id v +deallocate prepare stmt; +set optimizer_switch=@save_optimizer_switch; +drop table t1,t2,t3; +# +# End of 10.2 tests +# set optimizer_switch=@mrr_icp_extra_tmp; diff --git a/mysql-test/r/shutdown.result b/mysql-test/r/shutdown.result index ff2e450c3f0ed..be2eb16470caf 100644 --- a/mysql-test/r/shutdown.result +++ b/mysql-test/r/shutdown.result @@ -13,4 +13,4 @@ drop user user1@localhost; # # MDEV-8491 - On shutdown, report the user and the host executed that. # -FOUND /mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown/ in mysqld.1.err +FOUND 2 /mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown/ in mysqld.1.err diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index ca763a752149e..20c9a1d2e80bf 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -5479,7 +5479,7 @@ DROP FUNCTION f1; DROP VIEW v1; DROP TABLE t1, t2; create view v1 as select 1; -FOUND /mariadb-version/ in v1.frm +FOUND 1 /mariadb-version/ in v1.frm drop view v1; # # MDEV-7260: Crash in get_best_combination when executing multi-table diff --git a/mysql-test/r/wait_timeout_not_windows.result b/mysql-test/r/wait_timeout_not_windows.result index 7b129ce5f123c..f31dec1b4baf5 100644 --- a/mysql-test/r/wait_timeout_not_windows.result +++ b/mysql-test/r/wait_timeout_not_windows.result @@ -2,5 +2,5 @@ set global log_warnings=2; connect foo,localhost,root; set @@wait_timeout=1; connection default; -FOUND /Aborted.*Got timeout reading communication packets/ in mysqld.1.err +FOUND 1 /Aborted.*Got timeout reading communication packets/ in mysqld.1.err set global log_warnings=@@log_warnings; diff --git a/mysql-test/suite/binlog_encryption/encrypted_master.result b/mysql-test/suite/binlog_encryption/encrypted_master.result index 65dd12ccba328..8a3798731f2be 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master.result +++ b/mysql-test/suite/binlog_encryption/encrypted_master.result @@ -598,23 +598,23 @@ DROP SERVER server_name_to_encrypt; ############################# # Final checks for the master ############################# -NOT FOUND /_to_encrypt/ in master-bin.0* -NOT FOUND /COMMIT/ in master-bin.0* -NOT FOUND /TIMESTAMP/ in master-bin.0* +NOT FOUND /_to_encrypt.*/ in master-bin.0* +NOT FOUND /COMMIT.*/ in master-bin.0* +NOT FOUND /TIMESTAMP.*/ in master-bin.0* include/save_master_pos.inc ############################# # Final checks for the slave ############################# connection server_2; include/sync_io_with_master.inc -FOUND /_to_encrypt/ in slave-relay-bin.0* -FOUND /COMMIT/ in slave-relay-bin.0* -FOUND /TIMESTAMP/ in slave-relay-bin.0* +FOUND 1 /_to_encrypt.*/ in slave-relay-bin.0* +FOUND 1 /COMMIT.*/ in slave-relay-bin.0* +FOUND 1 /TIMESTAMP.*/ in slave-relay-bin.0* include/start_slave.inc include/sync_slave_sql_with_io.inc -FOUND /_to_encrypt/ in slave-bin.0* -FOUND /COMMIT/ in slave-bin.0* -FOUND /TIMESTAMP/ in slave-bin.0* +FOUND 1 /_to_encrypt.*/ in slave-bin.0* +FOUND 1 /COMMIT.*/ in slave-bin.0* +FOUND 1 /TIMESTAMP.*/ in slave-bin.0* ########## # Cleanup ########## diff --git a/mysql-test/suite/binlog_encryption/encrypted_master.test b/mysql-test/suite/binlog_encryption/encrypted_master.test index 5eb0345342d5d..503a40443d239 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master.test @@ -106,16 +106,17 @@ SET binlog_row_image= MINIMAL; --let $master_datadir= `SELECT @@datadir` +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc --disable_connect_log @@ -138,15 +139,15 @@ SET binlog_row_image= MINIMAL; # Check that relay logs are unencrypted --let SEARCH_FILE= $slave_datadir/slave-relay-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-relay-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-relay-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc @@ -158,15 +159,15 @@ SET binlog_row_image= MINIMAL; --enable_connect_log --let SEARCH_FILE= $slave_datadir/slave-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $slave_datadir/slave-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc --echo ########## diff --git a/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test b/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test index 7e5fd7859f062..c4cf337f94e36 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master_lost_key.test @@ -58,6 +58,7 @@ INSERT INTO table1_to_encrypt SELECT NULL,NOW(),b FROM table1_to_encrypt; # Make sure that binary logs are encrypted +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= master-bin.0* --let SEARCH_PATTERN= table1_to_encrypt --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test b/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test index 91231f89307ef..eec72d64066fc 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test +++ b/mysql-test/suite/binlog_encryption/encrypted_master_switch_to_unencrypted.test @@ -52,6 +52,7 @@ INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; # Make sure that binary logs are not encrypted +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= master-bin.0* --let SEARCH_PATTERN= table1_no_encryption --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/binlog_encryption/encrypted_slave.result b/mysql-test/suite/binlog_encryption/encrypted_slave.result index 00096a61a5ba8..ff8ae37401497 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_slave.result +++ b/mysql-test/suite/binlog_encryption/encrypted_slave.result @@ -149,9 +149,9 @@ DROP SERVER server_name_to_encrypt; ################# # Master binlog checks ################# -FOUND /_to_encrypt/ in master-bin.0* -FOUND /COMMIT/ in master-bin.0* -FOUND /TIMESTAMP/ in master-bin.0* +FOUND 1 /_to_encrypt.*/ in master-bin.0* +FOUND 1 /COMMIT.*/ in master-bin.0* +FOUND 1 /TIMESTAMP.*/ in master-bin.0* include/save_master_pos.inc ################# # Relay log checks diff --git a/mysql-test/suite/binlog_encryption/encrypted_slave.test b/mysql-test/suite/binlog_encryption/encrypted_slave.test index a69e78cd94074..f5697d9177997 100644 --- a/mysql-test/suite/binlog_encryption/encrypted_slave.test +++ b/mysql-test/suite/binlog_encryption/encrypted_slave.test @@ -42,16 +42,17 @@ --let $master_datadir= `SELECT @@datadir` +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= _to_encrypt +--let SEARCH_PATTERN= _to_encrypt.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= COMMIT +--let SEARCH_PATTERN= COMMIT.* --source include/search_pattern_in_file.inc --let SEARCH_FILE= $master_datadir/master-bin.0* ---let SEARCH_PATTERN= TIMESTAMP +--let SEARCH_PATTERN= TIMESTAMP.* --source include/search_pattern_in_file.inc --disable_connect_log diff --git a/mysql-test/suite/binlog_encryption/encryption_combo.result b/mysql-test/suite/binlog_encryption/encryption_combo.result index d921c73440dc9..de5d91dfab720 100644 --- a/mysql-test/suite/binlog_encryption/encryption_combo.result +++ b/mysql-test/suite/binlog_encryption/encryption_combo.result @@ -19,7 +19,7 @@ FLUSH BINARY LOGS; SET binlog_format=ROW; INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; -FOUND /table1_no_encryption/ in master-bin.0* +FOUND 11 /table1_no_encryption/ in master-bin.0* ##################################################### # Part 2: restart master, now with binlog encryption ##################################################### diff --git a/mysql-test/suite/binlog_encryption/encryption_combo.test b/mysql-test/suite/binlog_encryption/encryption_combo.test index a5cf117d4a8a4..c24e77f421546 100644 --- a/mysql-test/suite/binlog_encryption/encryption_combo.test +++ b/mysql-test/suite/binlog_encryption/encryption_combo.test @@ -52,6 +52,7 @@ INSERT INTO table1_no_encryption SELECT NULL,NOW(),b FROM table1_no_encryption; --let $master_datadir= `SELECT @@datadir` +--let SEARCH_RANGE = 500000 --let SEARCH_FILE= $master_datadir/master-bin.0* --let SEARCH_PATTERN= table1_no_encryption --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/binlog_encryption/rpl_checksum.result b/mysql-test/suite/binlog_encryption/rpl_checksum.result index 418536c35583d..41c4cd94aff15 100644 --- a/mysql-test/suite/binlog_encryption/rpl_checksum.result +++ b/mysql-test/suite/binlog_encryption/rpl_checksum.result @@ -174,7 +174,7 @@ INSERT INTO t4 VALUES (2); connection slave; include/wait_for_slave_sql_error.inc [errno=1590] Last_SQL_Error = 'The incident LOST_EVENTS occurred on the master. Message: error writing to the binary log' -FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err +FOUND 1 /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err SELECT * FROM t4 ORDER BY a; a 1 diff --git a/mysql-test/suite/binlog_encryption/rpl_loadfile.test b/mysql-test/suite/binlog_encryption/rpl_loadfile.test index 97886ca0f4838..40379a5c3d064 100644 --- a/mysql-test/suite/binlog_encryption/rpl_loadfile.test +++ b/mysql-test/suite/binlog_encryption/rpl_loadfile.test @@ -7,5 +7,6 @@ --echo # --let SEARCH_FILE=$datadir/master-bin.0* +--let SEARCH_RANGE = 500000 --let SEARCH_PATTERN= xxxxxxxxxxx --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/encryption/r/encrypt_and_grep.result b/mysql-test/suite/encryption/r/encrypt_and_grep.result index bd20b79aafe96..b1ffbdb81347b 100644 --- a/mysql-test/suite/encryption/r/encrypt_and_grep.result +++ b/mysql-test/suite/encryption/r/encrypt_and_grep.result @@ -21,7 +21,7 @@ NOT FOUND /foobar/ in t1.ibd # t2 ... on expecting NOT FOUND NOT FOUND /temp/ in t2.ibd # t3 no on expecting FOUND -FOUND /dummy/ in t3.ibd +FOUND 42 /dummy/ in t3.ibd # ibdata1 expecting NOT FOUND NOT FOUND /foobar/ in ibdata1 # Now turn off encryption and wait for threads to decrypt everything @@ -43,7 +43,7 @@ NOT FOUND /foobar/ in t1.ibd # t2 ... on expecting FOUND NOT FOUND /temp/ in t2.ibd # t3 no on expecting FOUND -FOUND /dummy/ in t3.ibd +FOUND 42 /dummy/ in t3.ibd # ibdata1 expecting NOT FOUND NOT FOUND /foobar/ in ibdata1 # Now turn on encryption and wait for threads to encrypt all spaces @@ -65,7 +65,7 @@ NOT FOUND /foobar/ in t1.ibd # t2 ... on expecting NOT FOUND NOT FOUND /temp/ in t2.ibd # t3 no on expecting FOUND -FOUND /dummy/ in t3.ibd +FOUND 42 /dummy/ in t3.ibd # ibdata1 expecting NOT FOUND NOT FOUND /foobar/ in ibdata1 drop table t1, t2, t3; diff --git a/mysql-test/suite/encryption/r/filekeys_emptyfile.result b/mysql-test/suite/encryption/r/filekeys_emptyfile.result index f94f11d9f089a..19bca3c36c725 100644 --- a/mysql-test/suite/encryption/r/filekeys_emptyfile.result +++ b/mysql-test/suite/encryption/r/filekeys_emptyfile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("System key id 1 is missing at"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /System key id 1 is missing at/ in mysqld.1.err +FOUND 1 /System key id 1 is missing at/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_encfile_bad.result b/mysql-test/suite/encryption/r/filekeys_encfile_bad.result index 6261bd459b8cf..59124f2babd15 100644 --- a/mysql-test/suite/encryption/r/filekeys_encfile_bad.result +++ b/mysql-test/suite/encryption/r/filekeys_encfile_bad.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*filekeys-data.enc. Wrong key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result b/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result index 98e2266f3f253..7e244c2c38121 100644 --- a/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result +++ b/mysql-test/suite/encryption/r/filekeys_encfile_badfile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("File 'bad' not found"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /File 'bad' not found/ in mysqld.1.err +FOUND 1 /File 'bad' not found/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_encfile_no.result b/mysql-test/suite/encryption/r/filekeys_encfile_no.result index 6261bd459b8cf..59124f2babd15 100644 --- a/mysql-test/suite/encryption/r/filekeys_encfile_no.result +++ b/mysql-test/suite/encryption/r/filekeys_encfile_no.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*filekeys-data.enc. Wrong key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*filekeys-data.enc. Wrong key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_nofile.result b/mysql-test/suite/encryption/r/filekeys_nofile.result index 690f2e61df0d2..2caf258fef778 100644 --- a/mysql-test/suite/encryption/r/filekeys_nofile.result +++ b/mysql-test/suite/encryption/r/filekeys_nofile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("file-key-management-filename is not set"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /file-key-management-filename is not set/ in mysqld.1.err +FOUND 1 /file-key-management-filename is not set/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_syntax.result b/mysql-test/suite/encryption/r/filekeys_syntax.result index eb8119bc4f555..019446096b94e 100644 --- a/mysql-test/suite/encryption/r/filekeys_syntax.result +++ b/mysql-test/suite/encryption/r/filekeys_syntax.result @@ -1,7 +1,7 @@ call mtr.add_suppression("File '.*keys.txt' not found"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /File '.*keys.txt' not found/ in mysqld.1.err +FOUND 1 /File '.*keys.txt' not found/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -12,7 +12,7 @@ ERROR HY000: Invalid key id at MYSQL_TMP_DIR/keys.txt line 2, column 2 call mtr.add_suppression("File '.*keys.txt' not found"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /File '.*keys.txt' not found/ in mysqld.1.err +FOUND 1 /File '.*keys.txt' not found/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -21,7 +21,7 @@ plugin_status call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 1 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -32,7 +32,7 @@ ERROR HY000: Invalid key id at MYSQL_TMP_DIR/keys.txt line 2, column 11 call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 2 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -41,7 +41,7 @@ plugin_status call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 2 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -52,7 +52,7 @@ ERROR HY000: Invalid key at MYSQL_TMP_DIR/keys.txt line 2, column 47 call mtr.add_suppression("Invalid key id"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key id/ in mysqld.1.err +FOUND 2 /Invalid key id/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -61,7 +61,7 @@ plugin_status call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 3 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -72,7 +72,7 @@ ERROR HY000: Invalid key at MYSQL_TMP_DIR/keys.txt line 2, column 33 call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 4 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -81,7 +81,7 @@ plugin_status call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 4 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -92,7 +92,7 @@ ERROR HY000: Syntax error at MYSQL_TMP_DIR/keys.txt line 2, column 2 call mtr.add_suppression("Invalid key"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Invalid key/ in mysqld.1.err +FOUND 4 /Invalid key/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -101,7 +101,7 @@ plugin_status call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 1 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -112,7 +112,7 @@ ERROR HY000: Syntax error at MYSQL_TMP_DIR/keys.txt line 2, column 1 call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 2 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -121,7 +121,7 @@ plugin_status call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 2 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -132,7 +132,7 @@ ERROR HY000: System key id 1 is missing at MYSQL_TMP_DIR/keys.txt line 1, column call mtr.add_suppression("Syntax error"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Syntax error/ in mysqld.1.err +FOUND 2 /Syntax error/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins @@ -141,7 +141,7 @@ plugin_status call mtr.add_suppression("System key id 1"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /System key id 1/ in mysqld.1.err +FOUND 1 /System key id 1/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_tooshort.result b/mysql-test/suite/encryption/r/filekeys_tooshort.result index efa6609756371..781bde6fd49d7 100644 --- a/mysql-test/suite/encryption/r/filekeys_tooshort.result +++ b/mysql-test/suite/encryption/r/filekeys_tooshort.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*tooshort.enc. Not encrypted"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*tooshort.enc. Not encrypted/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*tooshort.enc. Not encrypted/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/filekeys_unencfile.result b/mysql-test/suite/encryption/r/filekeys_unencfile.result index 1b9c092a713fc..316683486073c 100644 --- a/mysql-test/suite/encryption/r/filekeys_unencfile.result +++ b/mysql-test/suite/encryption/r/filekeys_unencfile.result @@ -1,7 +1,7 @@ call mtr.add_suppression("Cannot decrypt .*keys.txt. Not encrypted"); call mtr.add_suppression("Plugin 'file_key_management' init function returned error"); call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); -FOUND /Cannot decrypt .*keys.txt. Not encrypted/ in mysqld.1.err +FOUND 1 /Cannot decrypt .*keys.txt. Not encrypted/ in mysqld.1.err create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=1; ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") select plugin_status from information_schema.plugins diff --git a/mysql-test/suite/encryption/r/innodb-discard-import-change.result b/mysql-test/suite/encryption/r/innodb-discard-import-change.result index b0b01b7cf7b01..51670d89e52fa 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import-change.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import-change.result @@ -99,5 +99,5 @@ NOT FOUND /verysecretmessage/ in t3.ibd # t4 page compressed and encrypted expecting NOT FOUND NOT FOUND /verysecretmessage/ in t4.ibd # t5 normal expecting FOUND -FOUND /verysecretmessage/ in t5.ibd +FOUND 289 /verysecretmessage/ in t5.ibd DROP TABLE t1,t2,t3,t4,t5,t6; diff --git a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result index 07f6f98b88a8a..feaede20f2a88 100644 --- a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result +++ b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result @@ -57,9 +57,9 @@ NOT FOUND /secred/ in t5.ibd # t6 on expecting NOT FOUND NOT FOUND /secred/ in t6.ibd # t7 off expecting FOUND -FOUND /public/ in t7.ibd +FOUND 1 /public/ in t7.ibd # t8 row compressed expecting NOT FOUND -FOUND /public/ in t8.ibd +FOUND 1 /public/ in t8.ibd # t9 page compressed expecting NOT FOUND NOT FOUND /public/ in t9.ibd use test; diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_log.result b/mysql-test/suite/encryption/r/innodb_encrypt_log.result index c660ebe336bb5..f8f933be8315e 100644 --- a/mysql-test/suite/encryption/r/innodb_encrypt_log.result +++ b/mysql-test/suite/encryption/r/innodb_encrypt_log.result @@ -51,7 +51,7 @@ INSERT INTO t0 VALUES(NULL, 5, 5, 'public', 'gossip'); # ib_logfile0 expecting NOT FOUND NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)/ in ib_logfile0 # ib_logfile0 expecting FOUND -FOUND /public|gossip/ in ib_logfile0 +FOUND 3 /public|gossip/ in ib_logfile0 # ibdata1 expecting NOT FOUND NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)|public|gossip/ in ibdata1 # t0.ibd expecting NOT FOUND diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result b/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result index 957a8c1eec901..4a31f1ba4546c 100644 --- a/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result +++ b/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result @@ -3,52 +3,52 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log checkpoint SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err -FOUND /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err +FOUND 2 /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err # redo log from "after" MariaDB 10.2.2, but with invalid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid redo log header checksum/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid redo log header checksum/ in mysqld.1.err # distant future redo log format, with valid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err +FOUND 1 /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err # valid header, but old-format checkpoint blocks SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err +FOUND 1 /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err -FOUND /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err +FOUND 1 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err # --innodb-force-recovery=6 (skip the entire redo log) SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES -FOUND /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err +FOUND 1 /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block number SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -66,26 +66,26 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err -FOUND /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err -FOUND /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err -FOUND /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err -FOUND /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err +FOUND 1 /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err +FOUND 1 /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err +FOUND 1 /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err +FOUND 1 /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err # Test a corrupted MLOG_FILE_NAME record. # valid header, invalid checkpoint 1, valid checkpoint 2, invalid block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err # valid header, invalid checkpoint 1, valid checkpoint 2, invalid log record SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err -FOUND /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err +FOUND 1 /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err # missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -97,7 +97,7 @@ SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); 1 1 -FOUND /InnoDB: Encrypting redo log/ in mysqld.1.err +FOUND 1 /InnoDB: Encrypting redo log/ in mysqld.1.err ib_buffer_pool ib_logfile0 ib_logfile1 diff --git a/mysql-test/suite/encryption/t/encrypt_and_grep.test b/mysql-test/suite/encryption/t/encrypt_and_grep.test index fd54fc74f0aed..2ef69db237d51 100644 --- a/mysql-test/suite/encryption/t/encrypt_and_grep.test +++ b/mysql-test/suite/encryption/t/encrypt_and_grep.test @@ -13,7 +13,6 @@ --let t2_IBD = $MYSQLD_DATADIR/test/t2.ibd --let t3_IBD = $MYSQLD_DATADIR/test/t3.ibd --let SEARCH_RANGE = 10000000 ---let SEARCH_PATTERN=foobar SET GLOBAL innodb_file_per_table = ON; diff --git a/mysql-test/suite/encryption/t/filekeys_badtest.inc b/mysql-test/suite/encryption/t/filekeys_badtest.inc index 1cdea0e1a5376..60ac9f0e7985d 100644 --- a/mysql-test/suite/encryption/t/filekeys_badtest.inc +++ b/mysql-test/suite/encryption/t/filekeys_badtest.inc @@ -7,7 +7,6 @@ call mtr.add_suppression("Plugin 'file_key_management' init function returned er call mtr.add_suppression("Plugin 'file_key_management' registration.*failed"); --let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_RANGE= -10000 --source include/search_pattern_in_file.inc --error ER_CANT_CREATE_TABLE diff --git a/mysql-test/suite/innodb/r/autoinc_persist.result b/mysql-test/suite/innodb/r/autoinc_persist.result index 814f3d32e60cd..e61262076ed28 100644 --- a/mysql-test/suite/innodb/r/autoinc_persist.result +++ b/mysql-test/suite/innodb/r/autoinc_persist.result @@ -432,7 +432,6 @@ DELETE FROM t7 WHERE a = 100000200; set global innodb_flush_log_at_trx_commit=1; INSERT INTO t9 VALUES(100000000200); DELETE FROM t9 WHERE a = 100000000200; -# Kill and restart INSERT INTO t1 VALUES(0); SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1; Expect 126 @@ -498,7 +497,6 @@ SELECT * FROM t19; a 1 2 -# Kill and restart INSERT INTO t1 VALUES(0), (0); SELECT * FROM t1; a @@ -639,7 +637,6 @@ BEGIN; # Without the fix in page_create_empty() the counter value would be lost # when ROLLBACK deletes the last row. ROLLBACK; -# Kill and restart INSERT INTO t3 VALUES(0); SELECT MAX(a) AS `Expect 120` FROM t3; Expect 120 @@ -913,7 +910,6 @@ UPDATE t33 SET a = 10 WHERE a = 1; INSERT INTO t33 VALUES(2, NULL); ERROR 23000: Duplicate entry '2' for key 'PRIMARY' COMMIT; -# Kill and restart # This will not insert 0 INSERT INTO t31(a) VALUES(6), (0); SELECT * FROM t31; diff --git a/mysql-test/suite/innodb/r/innodb-blob.result b/mysql-test/suite/innodb/r/innodb-blob.result index afdaca9acd2d2..ec37492c279f6 100644 --- a/mysql-test/suite/innodb/r/innodb-blob.result +++ b/mysql-test/suite/innodb/r/innodb-blob.result @@ -43,7 +43,6 @@ a 3 BEGIN; INSERT INTO t2 VALUES (42); -# Kill and restart disconnect con1; disconnect con2; connection default; @@ -98,7 +97,6 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: before_row_upd_extern'; info UPDATE t3 SET c=REPEAT('i',3000) WHERE a=2 -# Kill and restart disconnect con2; connection default; ERROR HY000: Lost connection to MySQL server during query @@ -130,7 +128,6 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: after_row_upd_extern'; info UPDATE t3 SET c=REPEAT('j',3000) WHERE a=2 -# Kill and restart disconnect con2; connection default; ERROR HY000: Lost connection to MySQL server during query diff --git a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result index ca58b77a21e66..f03072053c3a3 100644 --- a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result +++ b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result @@ -38,7 +38,7 @@ SELECT b FROM t1 LIMIT 3; ERROR HY000: Lost connection to MySQL server during query disconnect con1; connection default; -FOUND /Wrote log record for ibuf update in place operation/ in my_restart.err +FOUND 1 /Wrote log record for ibuf update in place operation/ in my_restart.err CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK diff --git a/mysql-test/suite/innodb/r/innodb_bug53756.result b/mysql-test/suite/innodb/r/innodb_bug53756.result index 9809682a4b22f..06fa96c2f81f4 100644 --- a/mysql-test/suite/innodb/r/innodb_bug53756.result +++ b/mysql-test/suite/innodb/r/innodb_bug53756.result @@ -77,7 +77,6 @@ pk c1 4 44 START TRANSACTION; INSERT INTO bug_53756 VALUES (666,666); -# Kill and restart disconnect con1; disconnect con2; disconnect con3; diff --git a/mysql-test/suite/innodb/r/innodb_bug59641.result b/mysql-test/suite/innodb/r/innodb_bug59641.result index 2c042585745ce..8bf574e2bec5c 100644 --- a/mysql-test/suite/innodb/r/innodb_bug59641.result +++ b/mysql-test/suite/innodb/r/innodb_bug59641.result @@ -17,7 +17,6 @@ UPDATE t SET b=4*a WHERE a=32; XA END '789'; XA PREPARE '789'; CONNECT con3,localhost,root,,; -# Kill and restart SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT * FROM t; a b diff --git a/mysql-test/suite/innodb/r/log_alter_table.result b/mysql-test/suite/innodb/r/log_alter_table.result index f1ee61e757242..a6f35543c04f0 100644 --- a/mysql-test/suite/innodb/r/log_alter_table.result +++ b/mysql-test/suite/innodb/r/log_alter_table.result @@ -10,6 +10,8 @@ INSERT INTO t1 VALUES (1,2); ALTER TABLE t1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE; ALTER TABLE t1 DROP INDEX b, ADD INDEX (b); # Kill the server +FOUND 1 /scan .*: multi-log rec MLOG_FILE_CREATE2.*page .*:0/ in mysqld.1.err +FOUND 1 /scan .*: log rec MLOG_INDEX_LOAD/ in mysqld.1.err CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK diff --git a/mysql-test/suite/innodb/r/log_corruption.result b/mysql-test/suite/innodb/r/log_corruption.result index a932efeca6212..3a20a11cd8f1c 100644 --- a/mysql-test/suite/innodb/r/log_corruption.result +++ b/mysql-test/suite/innodb/r/log_corruption.result @@ -3,52 +3,52 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log checkpoint SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err -FOUND /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err +FOUND 2 /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err # redo log from before MariaDB 10.2.2, with corrupted log block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err +FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err # redo log from "after" MariaDB 10.2.2, but with invalid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid redo log header checksum/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid redo log header checksum/ in mysqld.1.err # distant future redo log format, with valid header checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err +FOUND 1 /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\. Please follow the instructions at http://dev.mysql.com/doc/refman/5.7/en/upgrading-downgrading.html/ in mysqld.1.err # valid header, but old-format checkpoint blocks SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err +FOUND 1 /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block checksum SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err -FOUND /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err +FOUND 1 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err # --innodb-force-recovery=6 (skip the entire redo log) SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES -FOUND /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err +FOUND 1 /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err # valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block number SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -66,26 +66,26 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err -FOUND /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err -FOUND /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err -FOUND /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err -FOUND /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=1213964/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bogus/ in mysqld.1.err +FOUND 1 /InnoDB: ############### CORRUPT LOG RECORD FOUND ##################/ in mysqld.1.err +FOUND 1 /InnoDB: Log record type 55, page 151:488\. Log parsing proceeded successfully up to 1213973\. Previous log record type 56, is multi 0 Recv offset 9, prev 0/ in mysqld.1.err +FOUND 1 /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8 bogus / in mysqld.1.err +FOUND 1 /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err # Test a corrupted MLOG_FILE_NAME record. # valid header, invalid checkpoint 1, valid checkpoint 2, invalid block SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err +FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err # valid header, invalid checkpoint 1, valid checkpoint 2, invalid log record SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err -FOUND /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err +FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err +FOUND 1 /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err # missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -97,8 +97,8 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Obtaining redo log encryption key version 1 failed/ in mysqld.1.err -FOUND /InnoDB: Decrypting checkpoint failed/ in mysqld.1.err +FOUND 1 /InnoDB: Obtaining redo log encryption key version 1 failed/ in mysqld.1.err +FOUND 1 /InnoDB: Decrypting checkpoint failed/ in mysqld.1.err ib_buffer_pool ib_logfile0 ib_logfile1 diff --git a/mysql-test/suite/innodb/r/log_file.result b/mysql-test/suite/innodb/r/log_file.result index 918faec8adae5..f0f8007cb0946 100644 --- a/mysql-test/suite/innodb/r/log_file.result +++ b/mysql-test/suite/innodb/r/log_file.result @@ -6,14 +6,14 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Could not create undo tablespace '.*undo002'/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Could not create undo tablespace '.*undo002'/ in mysqld.1.err # Remove undo001,undo002,ibdata1,ibdata2,ib_logfile1,ib_logfile2,ib_logfile101 # Start mysqld with non existent innodb_log_group_home_dir SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /File .path.to.non-existent.*ib_logfile101: 'create' returned OS error \d+/ in mysqld.1.err +FOUND 1 /File .path.to.non-existent.*ib_logfile101: 'create' returned OS error \d+/ in mysqld.1.err # Remove ibdata1 & ibdata2 # Successfully let InnoDB create tablespaces SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES @@ -27,7 +27,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /The innodb_system data file 'ibdata1' was not found but one of the other data files 'ibdata2' exists/ in mysqld.1.err +FOUND 1 /The innodb_system data file 'ibdata1' was not found but one of the other data files 'ibdata2' exists/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -49,8 +49,8 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Tablespace size stored in header is \d+ pages, but the sum of data file sizes is \d+ pages/ in mysqld.1.err -FOUND /InnoDB: Cannot start InnoDB. The tail of the system tablespace is missing/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace size stored in header is \d+ pages, but the sum of data file sizes is \d+ pages/ in mysqld.1.err +FOUND 1 /InnoDB: Cannot start InnoDB. The tail of the system tablespace is missing/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -88,7 +88,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: undo tablespace .*undo001.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err +FOUND 1 /InnoDB: undo tablespace .*undo001.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -175,7 +175,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /undo tablespace .*undo003.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err +FOUND 1 /undo tablespace .*undo003.* exists\. Creating system tablespace with existing undo tablespaces is not supported\. Please delete all undo tablespaces before creating new system tablespace\./ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -207,7 +207,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Unable to open undo tablespace.*undo002/ in mysqld.1.err +FOUND 1 /InnoDB: Unable to open undo tablespace.*undo002/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -244,7 +244,7 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Unable to open undo tablespace.*undo001/ in mysqld.1.err +FOUND 1 /InnoDB: Unable to open undo tablespace.*undo001/ in mysqld.1.err bak_ib_logfile0 bak_ib_logfile1 bak_ib_logfile2 @@ -340,7 +340,7 @@ WHERE engine='innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); 1 1 -FOUND /Resizing redo log from 1\*\d+ to 3\*\d+ pages; LSN=\d+/ in mysqld.1.err +FOUND 1 /Resizing redo log from 1\*\d+ to 3\*\d+ pages; LSN=\d+/ in mysqld.1.err # Cleanup bak_ib_logfile0 bak_ib_logfile1 diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result index e59041657812c..df4c9f637be53 100644 --- a/mysql-test/suite/innodb/r/log_file_name.result +++ b/mysql-test/suite/innodb/r/log_file_name.result @@ -14,30 +14,30 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID/ in mysqld.1.err +FOUND 1 /InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID.*/ in mysqld.1.err # Fault 2: Wrong space_id in a dirty file, and a missing file. SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Ignoring data file '.*t1.ibd' with space ID/ in mysqld.1.err -FOUND /InnoDB: Tablespace \d+ was not found at.*t3.ibd/ in mysqld.1.err +FOUND 1 /InnoDB: Ignoring data file '.*t1.ibd' with space ID.*/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at.*t3.ibd.*/ in mysqld.1.err # Fault 3: Wrong space_id in a dirty file, and no missing file. SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Ignoring data file '.*t[23].ibd' with space ID/ in mysqld.1.err -FOUND /InnoDB: Tablespace \d+ was not found at .*t1.ibd/ in mysqld.1.err -FOUND /InnoDB: Tablespace \d+ was not found at .*t3.ibd/ in mysqld.1.err -FOUND /InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace/ in mysqld.1.err +FOUND 1 /InnoDB: Ignoring data file '.*t[23].ibd' with space ID.*/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t1.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t3.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*/ in mysqld.1.err # Fault 4: Missing data file SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /InnoDB: Tablespace \d+ was not found at .*t[12].ibd. -.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t[12].ibd. +.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*/ in mysqld.1.err # Fault 5: Wrong type of data file SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' @@ -47,8 +47,8 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Cannot read first page of .*t2.ibd/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot read first page of .*t2.ibd.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages.*/ in mysqld.1.err SELECT * FROM t2; a 9 @@ -81,20 +81,20 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Cannot read first page of .*u2.ibd/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot read first page of .*u2.ibd.*/ in mysqld.1.err # Fault 7: Missing or wrong data file and innodb_force_recovery SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err -FOUND /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists/ in mysqld.1.err -FOUND /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err -FOUND /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ in mysqld.1.err -FOUND /\[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists.*/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*/ in mysqld.1.err +FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*/ in mysqld.1.err +FOUND 1 /\[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!.*/ in mysqld.1.err DROP TABLE u1,u2,u3,u6; # List of files: SHOW TABLES; diff --git a/mysql-test/suite/innodb/r/log_file_name_debug.result b/mysql-test/suite/innodb/r/log_file_name_debug.result index e33ce36d1f2d6..ae7ce48fe5e80 100644 --- a/mysql-test/suite/innodb/r/log_file_name_debug.result +++ b/mysql-test/suite/innodb/r/log_file_name_debug.result @@ -7,8 +7,8 @@ CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; # Kill the server SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -FOUND /InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either/ in mysqld.1.err +FOUND 1 /InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' -FOUND /srv_prepare_to_delete_redo_log_files: ib_log: MLOG_CHECKPOINT.* written/ in mysqld.1.err +FOUND 1 /srv_prepare_to_delete_redo_log_files: ib_log: MLOG_CHECKPOINT.* written/ in mysqld.1.err DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/log_file_size.result b/mysql-test/suite/innodb/r/log_file_size.result index e07dba67a7bf2..9e0cb4dbbf7b7 100644 --- a/mysql-test/suite/innodb/r/log_file_size.result +++ b/mysql-test/suite/innodb/r/log_file_size.result @@ -1,13 +1,11 @@ CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; BEGIN; INSERT INTO t1 VALUES (42); -# Kill and restart: --innodb-log-file-size=6M SELECT * FROM t1; a INSERT INTO t1 VALUES (42); BEGIN; DELETE FROM t1; -# Kill and restart: --innodb-log-files-in-group=3 --innodb-log-file-size=5M SELECT * FROM t1; a 42 @@ -24,34 +22,48 @@ connection default; # Kill the server SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /syntax error in innodb_log_group_home_dir/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=.*/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: innodb_read_only prevents crash recovery/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 2 /redo log from 3\*[0-9]+ to 2\*[0-9]+ pages/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 3 /redo log from 3\*[0-9]+ to 2\*[0-9]+ pages/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 2 /InnoDB: innodb_read_only prevents crash recovery/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 4 /redo log from 3\*[0-9]+ to 2\*[0-9]+ pages/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Cannot create log files in read-only mode/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Log file .*ib_logfile0 size 7 is not a multiple of innodb_page_size/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Log file .*ib_logfile1 is of different size 1048576 bytes than other log files/ in mysqld.1.err SELECT * FROM t1; ERROR 42000: Unknown storage engine 'InnoDB' +FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err +FOUND 1 /InnoDB: Renaming log file .*ib_logfile101 to .*ib_logfile0/ in mysqld.1.err SELECT * FROM t1; a 42 diff --git a/mysql-test/suite/innodb/r/read_only_recovery.result b/mysql-test/suite/innodb/r/read_only_recovery.result index 7fcbfddf33ea7..532749a7aae32 100644 --- a/mysql-test/suite/innodb/r/read_only_recovery.result +++ b/mysql-test/suite/innodb/r/read_only_recovery.result @@ -13,7 +13,6 @@ SET GLOBAL innodb_flush_log_at_trx_commit=1; BEGIN; INSERT INTO t VALUES(0); ROLLBACK; -# Kill and restart: --innodb-force-recovery=3 disconnect con1; SELECT * FROM t; a diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result index 1fb73f4e775af..72c5250934b44 100644 --- a/mysql-test/suite/innodb/r/temporary_table.result +++ b/mysql-test/suite/innodb/r/temporary_table.result @@ -138,18 +138,23 @@ Tables_in_test create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; ERROR HY000: Can't create table `test`.`t1` (errno: 165 "Table is read only") # test various bad start-up parameters +FOUND 1 /innodb_temporary and innodb_system file names seem to be the same/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /support raw device/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 2 /support raw device/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /The innodb_temporary data file 'ibtmp1' must be at least/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +FOUND 1 /InnoDB: syntax error in file path/ in mysqld.1.err SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS diff --git a/mysql-test/suite/innodb/r/xa_recovery.result b/mysql-test/suite/innodb/r/xa_recovery.result index 7a9448ad9f04d..a93afcb07f8e3 100644 --- a/mysql-test/suite/innodb/r/xa_recovery.result +++ b/mysql-test/suite/innodb/r/xa_recovery.result @@ -6,7 +6,6 @@ UPDATE t1 set a=2; XA END 'x'; XA PREPARE 'x'; connection default; -# Kill and restart disconnect con1; connect con1,localhost,root; SELECT * FROM t1 LOCK IN SHARE MODE; diff --git a/mysql-test/suite/innodb/t/autoinc_persist.test b/mysql-test/suite/innodb/t/autoinc_persist.test index 45a96f85fe114..904ed51f718e9 100644 --- a/mysql-test/suite/innodb/t/autoinc_persist.test +++ b/mysql-test/suite/innodb/t/autoinc_persist.test @@ -251,7 +251,8 @@ set global innodb_flush_log_at_trx_commit=1; INSERT INTO t9 VALUES(100000000200); DELETE FROM t9 WHERE a = 100000000200; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc INSERT INTO t1 VALUES(0); SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1; @@ -306,7 +307,7 @@ RENAME TABLE t9 to t19; INSERT INTO t19 VALUES(0), (0); SELECT * FROM t19; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc INSERT INTO t1 VALUES(0), (0); SELECT * FROM t1; @@ -400,7 +401,7 @@ while ($i) { --enable_query_log ROLLBACK; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc INSERT INTO t3 VALUES(0); SELECT MAX(a) AS `Expect 120` FROM t3; @@ -494,7 +495,7 @@ UPDATE t33 SET a = 10 WHERE a = 1; INSERT INTO t33 VALUES(2, NULL); COMMIT; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc --echo # This will not insert 0 INSERT INTO t31(a) VALUES(6), (0); diff --git a/mysql-test/suite/innodb/t/innodb-blob.test b/mysql-test/suite/innodb/t/innodb-blob.test index ea50af4a7fceb..d2484e2175d45 100644 --- a/mysql-test/suite/innodb/t/innodb-blob.test +++ b/mysql-test/suite/innodb/t/innodb-blob.test @@ -70,7 +70,8 @@ SELECT a FROM t1; BEGIN; INSERT INTO t2 VALUES (42); ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con1; disconnect con2; @@ -138,7 +139,8 @@ SET DEBUG_SYNC='now WAIT_FOR have_latch'; SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: before_row_upd_extern'; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con2; connection default; @@ -177,7 +179,8 @@ SET DEBUG_SYNC='now WAIT_FOR have_latch'; SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: after_row_upd_extern'; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con2; connection default; diff --git a/mysql-test/suite/innodb/t/innodb_bug53756.test b/mysql-test/suite/innodb/t/innodb_bug53756.test index d6bccf70147bb..a676868aea794 100644 --- a/mysql-test/suite/innodb/t/innodb_bug53756.test +++ b/mysql-test/suite/innodb/t/innodb_bug53756.test @@ -84,7 +84,8 @@ SELECT * FROM bug_53756; START TRANSACTION; INSERT INTO bug_53756 VALUES (666,666); ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc --disconnect con1 --disconnect con2 --disconnect con3 diff --git a/mysql-test/suite/innodb/t/innodb_bug59641.test b/mysql-test/suite/innodb/t/innodb_bug59641.test index 5f7528cf01af5..e0d3431e45b0c 100644 --- a/mysql-test/suite/innodb/t/innodb_bug59641.test +++ b/mysql-test/suite/innodb/t/innodb_bug59641.test @@ -33,7 +33,8 @@ XA PREPARE '789'; CONNECT (con3,localhost,root,,); ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT * FROM t; COMMIT; diff --git a/mysql-test/suite/innodb/t/log_alter_table.test b/mysql-test/suite/innodb/t/log_alter_table.test index f479c6695aa97..6f12dfaf0b9b0 100644 --- a/mysql-test/suite/innodb/t/log_alter_table.test +++ b/mysql-test/suite/innodb/t/log_alter_table.test @@ -4,6 +4,9 @@ # Embedded server does not support crashing --source include/not_embedded.inc +# start afresh +--source include/restart_mysqld.inc + --echo # --echo # Bug#21801423 INNODB REDO LOG DOES NOT INDICATE WHEN --echo # FILES ARE CREATED @@ -26,19 +29,12 @@ ALTER TABLE t1 DROP INDEX b, ADD INDEX (b); --let $restart_parameters= --debug=d,ib_log --source include/start_mysqld.inc -let SEARCH_RANGE = -50000; let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err; let SEARCH_ABORT=NOT FOUND; -# Look for at least one MLOG_FILE_CREATE2 in the error log. -# Theoretically, it may have been written by this test or an earlier test. -# FIXME: redirect the error log of the restart to a new file, -# and ensure that we have exactly 2 records there. +# ensure that we have exactly 2 records there. let SEARCH_PATTERN=scan .*: multi-log rec MLOG_FILE_CREATE2.*page .*:0; --source include/search_pattern_in_file.inc -# Look for at least one MLOG_INDEX_LOAD in the error log. -# Theoretically, it may have been written by this test or an earlier test. -# FIXME: redirect the error log of the restart to a new file, -# and ensure that we have exactly 3 records there. +# ensure that we have exactly 3 records there. let SEARCH_PATTERN=scan .*: log rec MLOG_INDEX_LOAD; --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/innodb/t/log_corruption.test b/mysql-test/suite/innodb/t/log_corruption.test index 7cfbda181e0dd..8013cc4583098 100644 --- a/mysql-test/suite/innodb/t/log_corruption.test +++ b/mysql-test/suite/innodb/t/log_corruption.test @@ -20,7 +20,6 @@ call mtr.add_suppression("InnoDB: Decrypting checkpoint failed"); let bugdir= $MYSQLTEST_VARDIR/tmp/log_corruption; --mkdir $bugdir ---let SEARCH_RANGE = -50000 --let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES diff --git a/mysql-test/suite/innodb/t/log_file.test b/mysql-test/suite/innodb/t/log_file.test index e0f4b85c6825b..f5d46b1db108b 100644 --- a/mysql-test/suite/innodb/t/log_file.test +++ b/mysql-test/suite/innodb/t/log_file.test @@ -26,7 +26,6 @@ let bugdir= $MYSQLTEST_VARDIR/tmp/log_file; --mkdir $bugdir let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE = -100000; let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); diff --git a/mysql-test/suite/innodb/t/log_file_name.test b/mysql-test/suite/innodb/t/log_file_name.test index e528abc80d5a3..0a8dc3e1fc0a6 100644 --- a/mysql-test/suite/innodb/t/log_file_name.test +++ b/mysql-test/suite/innodb/t/log_file_name.test @@ -30,7 +30,6 @@ COMMIT; --copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t1.ibd let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE= -50000; let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); @@ -39,7 +38,7 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED'); # checkpoint after the INSERT. That is what we checked above. --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID; +let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -54,10 +53,10 @@ let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Ano --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t1.ibd' with space ID; +let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t1.ibd' with space ID.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd; +let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -73,14 +72,14 @@ let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd; --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t[23].ibd' with space ID; +let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t[23].ibd' with space ID.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t1.ibd; +let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t3.ibd; +let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t3.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace; +let SEARCH_PATTERN= InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -96,7 +95,7 @@ eval $check_no_innodb; --source include/shutdown_mysqld.inc let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t[12].ibd. -.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace; +.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace.*; --source include/search_pattern_in_file.inc --echo # Fault 5: Wrong type of data file @@ -120,9 +119,9 @@ EOF eval $check_no_innodb; --source include/shutdown_mysqld.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*t2.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*t2.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages.*; --source include/search_pattern_in_file.inc # Restore t2.ibd @@ -214,17 +213,17 @@ EOF --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages.*; --source include/search_pattern_in_file.inc # TODO: These errors should state the file name (u2.ibd) and be ignored # in innodb-force-recovery mode once # Bug#18131883 IMPROVE INNODB ERROR MESSAGES REGARDING FILES # has been fixed: -let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*u2.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*u2.ibd.*; --source include/search_pattern_in_file.inc --source include/shutdown_mysqld.inc @@ -239,26 +238,26 @@ let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot read first page of .*u2.ibd; --source include/start_mysqld.inc eval $check_no_innodb; -let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace; +let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' for space ID \d+ because the target file exists.*; --source include/search_pattern_in_file.inc --remove_file $MYSQLD_DATADIR/test/u6.ibd --source include/restart_mysqld.inc -let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd; +let SEARCH_PATTERN= \[ERROR\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace; +let SEARCH_PATTERN= InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace.*; --source include/search_pattern_in_file.inc -let SEARCH_PATTERN= \[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!; +let SEARCH_PATTERN= \[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!.*; --source include/search_pattern_in_file.inc --let $restart_parameters= diff --git a/mysql-test/suite/innodb/t/log_file_name_debug.test b/mysql-test/suite/innodb/t/log_file_name_debug.test index 44012d38c8ee6..0aaf798e2b379 100644 --- a/mysql-test/suite/innodb/t/log_file_name_debug.test +++ b/mysql-test/suite/innodb/t/log_file_name_debug.test @@ -32,7 +32,6 @@ CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; SELECT * FROM t1; --let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_RANGE = -50000 --let SEARCH_PATTERN = InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either --source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/innodb/t/log_file_size.test b/mysql-test/suite/innodb/t/log_file_size.test index 4705ca680917c..ae6442a7994ba 100644 --- a/mysql-test/suite/innodb/t/log_file_size.test +++ b/mysql-test/suite/innodb/t/log_file_size.test @@ -29,7 +29,8 @@ BEGIN; INSERT INTO t1 VALUES (42); let $restart_parameters = --innodb-log-file-size=6M; ---source include/kill_and_restart_mysqld.inc +let $shutdown_timeout=0; +--source include/restart_mysqld.inc SELECT * FROM t1; @@ -38,7 +39,8 @@ BEGIN; DELETE FROM t1; let $restart_parameters = --innodb-log-files-in-group=3 --innodb-log-file-size=5M; ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc +let $shutdown_timeout=; SELECT * FROM t1; @@ -46,7 +48,6 @@ INSERT INTO t1 VALUES (0),(123); let MYSQLD_DATADIR= `select @@datadir`; let SEARCH_ABORT = NOT FOUND; -let SEARCH_RANGE= -50000; let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; BEGIN; @@ -73,7 +74,7 @@ let SEARCH_PATTERN= syntax error in innodb_log_group_home_dir; --source include/restart_mysqld.inc --error ER_UNKNOWN_STORAGE_ENGINE SELECT * FROM t1; -let SEARCH_PATTERN= InnoDB: Starting crash recovery from checkpoint LSN=; +let SEARCH_PATTERN= InnoDB: Starting crash recovery from checkpoint LSN=.*; --source include/search_pattern_in_file.inc --let $restart_parameters= --debug=d,innodb_log_abort_3 diff --git a/mysql-test/suite/innodb/t/read_only_recovery.test b/mysql-test/suite/innodb/t/read_only_recovery.test index b111d96debebf..a1a69be724bac 100644 --- a/mysql-test/suite/innodb/t/read_only_recovery.test +++ b/mysql-test/suite/innodb/t/read_only_recovery.test @@ -20,7 +20,9 @@ BEGIN; INSERT INTO t VALUES(0); ROLLBACK; --let $restart_parameters= --innodb-force-recovery=3 ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout= 0 +--source include/restart_mysqld.inc +--let $shutdown_timeout= 30 --disconnect con1 SELECT * FROM t; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; diff --git a/mysql-test/suite/innodb/t/temporary_table.test b/mysql-test/suite/innodb/t/temporary_table.test index 9f63fe52f3b50..f841acff1c083 100644 --- a/mysql-test/suite/innodb/t/temporary_table.test +++ b/mysql-test/suite/innodb/t/temporary_table.test @@ -122,7 +122,6 @@ create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb --echo # test various bad start-up parameters let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE = -50000; let SEARCH_ABORT = NOT FOUND; let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); diff --git a/mysql-test/suite/innodb/t/xa_recovery.test b/mysql-test/suite/innodb/t/xa_recovery.test index f5c2b6555454c..957b758d05cee 100644 --- a/mysql-test/suite/innodb/t/xa_recovery.test +++ b/mysql-test/suite/innodb/t/xa_recovery.test @@ -15,7 +15,8 @@ connect (con1,localhost,root); XA START 'x'; UPDATE t1 set a=2; XA END 'x'; XA PREPARE 'x'; connection default; ---source include/kill_and_restart_mysqld.inc +--let $shutdown_timeout=0 +--source include/restart_mysqld.inc disconnect con1; connect (con1,localhost,root); diff --git a/mysql-test/suite/innodb_fts/r/crash_recovery.result b/mysql-test/suite/innodb_fts/r/crash_recovery.result index 2ff867b70feb3..7bf86631d1e3d 100644 --- a/mysql-test/suite/innodb_fts/r/crash_recovery.result +++ b/mysql-test/suite/innodb_fts/r/crash_recovery.result @@ -23,7 +23,6 @@ DELETE FROM articles LIMIT 1; ROLLBACK; disconnect flush_redo_log; connection default; -# Kill and restart INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...'); CREATE FULLTEXT INDEX idx ON articles (title,body); @@ -52,7 +51,6 @@ DELETE FROM articles LIMIT 1; ROLLBACK; disconnect flush_redo_log; connection default; -# Kill and restart INSERT INTO articles (title,body) VALUES ('MySQL Tutorial','DBMS stands for DataBase ...'); SELECT * FROM articles @@ -83,7 +81,6 @@ INSERT INTO articles VALUES BEGIN; INSERT INTO articles VALUES (100, 200, 'MySQL Tutorial','DBMS stands for DataBase ...'); -# Kill and restart INSERT INTO articles VALUES (8, 12, 'MySQL Tutorial','DBMS stands for DataBase ...'); SELECT * FROM articles WHERE MATCH (title, body) AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE); diff --git a/mysql-test/suite/innodb_fts/t/crash_recovery.test b/mysql-test/suite/innodb_fts/t/crash_recovery.test index 63c920a91ecdc..63843ef8511d2 100644 --- a/mysql-test/suite/innodb_fts/t/crash_recovery.test +++ b/mysql-test/suite/innodb_fts/t/crash_recovery.test @@ -47,7 +47,8 @@ ROLLBACK; --disconnect flush_redo_log --connection default ---source include/kill_and_restart_mysqld.inc +let $shutdown_timeout=0; +--source include/restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash INSERT INTO articles (title,body) VALUES @@ -85,7 +86,7 @@ ROLLBACK; --disconnect flush_redo_log --connection default ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc # This insert will re-initialize the Doc ID counter, it should not crash INSERT INTO articles (title,body) VALUES @@ -126,7 +127,7 @@ BEGIN; INSERT INTO articles VALUES (100, 200, 'MySQL Tutorial','DBMS stands for DataBase ...'); ---source include/kill_and_restart_mysqld.inc +--source include/restart_mysqld.inc # This would re-initialize the FTS index and do the re-tokenization # of above records diff --git a/mysql-test/suite/innodb_zip/r/innochecksum.result b/mysql-test/suite/innodb_zip/r/innochecksum.result index ff1bccfb60cc9..31d9450df8021 100644 --- a/mysql-test/suite/innodb_zip/r/innochecksum.result +++ b/mysql-test/suite/innodb_zip/r/innochecksum.result @@ -14,16 +14,16 @@ insert into t1 values(3,"compressed table"); [2]: check the innochecksum with full form --strict-check=crc32 [3]: check the innochecksum with short form -C crc32 [4]: check the innochecksum with --no-check ignores algorithm check, warning is expected -FOUND /Error: --no-check must be associated with --write option./ in my_restart.err +FOUND 1 /Error: --no-check must be associated with --write option./ in my_restart.err [5]: check the innochecksum with short form --no-check ignores algorithm check, warning is expected -FOUND /Error: --no-check must be associated with --write option./ in my_restart.err +FOUND 1 /Error: --no-check must be associated with --write option./ in my_restart.err [6]: check the innochecksum with full form strict-check & no-check , an error is expected -FOUND /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err +FOUND 1 /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err [7]: check the innochecksum with short form strict-check & no-check , an error is expected -FOUND /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err +FOUND 1 /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err [8]: check the innochecksum with short & full form combination # strict-check & no-check, an error is expected -FOUND /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err +FOUND 1 /Error: --strict-check option cannot be used together with --no-check option./ in my_restart.err [9]: check the innochecksum with full form --strict-check=innodb [10]: check the innochecksum with full form --strict-check=none # when server Default checksum=crc32 @@ -32,16 +32,16 @@ FOUND /Error: --strict-check option cannot be used together with --no-check opti [12]: check the innochecksum with short form -C none # when server Default checksum=crc32 [13]: check strict-check with invalid values -FOUND /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'crc\' to \'strict-check\'/ in my_restart.err -FOUND /Error while setting value \'no\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_innodb\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_crc32\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_none\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'InnoBD\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'crc\' to \'strict-check\'/ in my_restart.err +FOUND 1 /Error while setting value \'no\' to \'strict-check\'/ in my_restart.err [14a]: when server default checksum=crc32 rewrite new checksum=crc32 with innochecksum # Also check the long form of write option. [14b]: when server default checksum=crc32 rewrite new checksum=innodb with innochecksum @@ -85,7 +85,7 @@ c1 c2 1 Innochecksum InnoDB1 # Stop server [18]:check Innochecksum with invalid write options -FOUND /Error while setting value \'strict_crc32\' to \'write\'/ in my_restart.err -FOUND /Error while setting value \'strict_innodb\' to \'write\'/ in my_restart.err -FOUND /Error while setting value \'crc23\' to \'write\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_crc32\' to \'write\'/ in my_restart.err +FOUND 1 /Error while setting value \'strict_innodb\' to \'write\'/ in my_restart.err +FOUND 1 /Error while setting value \'crc23\' to \'write\'/ in my_restart.err DROP TABLE tab1; diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_3.result b/mysql-test/suite/innodb_zip/r/innochecksum_3.result index da7de031f421f..800556c4ff346 100644 --- a/mysql-test/suite/innodb_zip/r/innochecksum_3.result +++ b/mysql-test/suite/innodb_zip/r/innochecksum_3.result @@ -206,10 +206,10 @@ Filename::tab#.ibd # allow-mismatches,page,start-page,end-page [9]: check the both short and long options "page" and "start-page" when # seek value is larger than file size. -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err -FOUND /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err +FOUND 1 /Error: Unable to seek to necessary offset: Invalid argument/ in my_restart.err [34]: check the invalid upper bound values for options, allow-mismatches, end-page, start-page and page. # innochecksum will fail with error code: 1 NOT FOUND /Incorrect unsigned integer value: '18446744073709551616'/ in my_restart.err diff --git a/mysql-test/suite/rpl/r/rpl_checksum.result b/mysql-test/suite/rpl/r/rpl_checksum.result index e74e5af9f84fa..a74b688d7222d 100644 --- a/mysql-test/suite/rpl/r/rpl_checksum.result +++ b/mysql-test/suite/rpl/r/rpl_checksum.result @@ -174,7 +174,7 @@ INSERT INTO t4 VALUES (2); connection slave; include/wait_for_slave_sql_error.inc [errno=1590] Last_SQL_Error = 'The incident LOST_EVENTS occurred on the master. Message: error writing to the binary log' -FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err +FOUND 1 /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590/ in mysqld.2.err SELECT * FROM t4 ORDER BY a; a 1 diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result index db80abf1df236..593f83a79467f 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result @@ -49,8 +49,8 @@ a 3 4 5 -FOUND /Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error/ in mysqld.2.err -FOUND /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590/ in mysqld.2.err +FOUND 1 /Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error/ in mysqld.2.err +FOUND 1 /Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590/ in mysqld.2.err connection master; DROP TABLE t1; connection master; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test index 2ae910ff3e963..ea3210621001d 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test @@ -68,7 +68,6 @@ if(!$log_error_) let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; } --let SEARCH_FILE=$log_error_ ---let SEARCH_RANGE=-50000 --let SEARCH_PATTERN=Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error --source include/search_pattern_in_file.inc --let SEARCH_PATTERN=Slave SQL: The incident LOST_EVENTS occurred on the master\. Message: , Internal MariaDB error code: 1590 diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave_error.test b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test index a88981c15c4fd..10d7c7736f18e 100644 --- a/mysql-test/suite/rpl/t/rpl_stop_slave_error.test +++ b/mysql-test/suite/rpl/t/rpl_stop_slave_error.test @@ -9,7 +9,6 @@ sync_slave_with_master; source include/stop_slave.inc; let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/slave_log.err; let SEARCH_PATTERN=Error reading packet from server: Lost connection; -let SEARCH_RANGE= -50000; source include/search_pattern_in_file.inc; source include/start_slave.inc; diff --git a/mysql-test/t/ctype_collate.test b/mysql-test/t/ctype_collate.test index 23d34deb9816c..6704395a0a1d4 100644 --- a/mysql-test/t/ctype_collate.test +++ b/mysql-test/t/ctype_collate.test @@ -308,3 +308,34 @@ DROP FUNCTION getText; DROP DATABASE test1; USE test; SET NAMES latin1; + +--echo # +--echo # MDEV-11320, MySQL BUG#81810: Inconsistent sort order for blob/text between InnoDB and filesort +--echo # + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", + KEY b (b(32)) +); +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); + + +drop table t1; + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", + PRIMARY KEY b (b(32)) +); + +INSERT INTO t1 (b) VALUES ('a'), (_binary 0x1), (_binary 0x0), (''); + +explain +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; +select hex(b) from t1 force index (PRIMARY) where b<'zzz'; + +explain +select hex(b) from t1 where b<'zzz' order by b; +select hex(b) from t1 where b<'zzz' order by b; + +drop table t1; + diff --git a/mysql-test/t/named_pipe.test b/mysql-test/t/named_pipe.test index af74c200e9654..8503907b80801 100644 --- a/mysql-test/t/named_pipe.test +++ b/mysql-test/t/named_pipe.test @@ -28,6 +28,5 @@ let $MYSQLD_DATADIR= `select @@datadir`; --error 1 --exec $MYSQLD_CMD --enable-named-pipe --skip-networking --log-error=second-mysqld.err let SEARCH_FILE=$MYSQLD_DATADIR/second-mysqld.err; -let SEARCH_RANGE= -50; let SEARCH_PATTERN=\[ERROR\] Create named pipe failed; source include/search_pattern_in_file.inc; diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index 30f4419bd7e4b..ab951809b7a0b 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -1857,3 +1857,195 @@ DROP TABLE t1; --echo # --echo # End of 10.1 tests --echo # + +--echo # +--echo # MDEV-10454: range access keys extracted +--echo # from IN () +--echo # + +create table t1(a int, b int, c varchar(16), key idx(a,b)) engine=myisam; + +insert into t1 values + (1,1,'xx'), (2,2,'yyy'), (3,3,'zzzz'), (1,2,'zz'), (1,3,'x'), + (2,3,'yy'), (4,5,'ww'), (7,8,'xxxxx'), (4,3,'zyx'), (1,2,'uuu'), + (2,1,'w'), (5,5,'wx'), (2,3,'ww'), (7,7,'xxxyy'), (3,3,'zyxw'), + (3,2,'uuuw'), (2,2,'wxz'), (5,5,'xw'), (12,12,'xx'), (12,12,'y'), + (13,13,'z'), (11,12,'zz'), (11,13,'x'), (12,13,'y'), (14,15,'w'), + (17,18,'xx'), (14,13,'zx'), (11,12,'u'), (12,11,'w'), (5,5,'wx'), + (12,13,'ww'), (17,17,'xxxyy'), (13,13,'zyxw'), (13,12,'uuuw'), (12,12,'wxz'), + (15,15,'xw'), (1,1,'xa'), (2,2,'yya'), (3,3,'zzza'), (1,2,'za'), + (1,3,'xb'), (2,3,'ya'), (4,5,'wa'), (7,8,'xxxxa'), (4,3,'zya'), + (1,2,'uua'), (2,1,'wb'), (5,5,'wc'), (2,3,'wa'), (7,7,'xxxya'), + (3,3,'zyxa'), (3,2,'uuua'), (2,2,'wxa'), (5,5,'xa'), (12,12,'xa'), + (22,12,'yb'), (23,13,'zb'), (21,12,'za'), (24,13,'c'), (32,13,'d'), + (34,15,'wd'), (47,18,'xa'), (54,13,'za'), (51,12,'ub'), (52,11,'wc'), + (5,5,'wd'), (62,13,'wa'), (67,17,'xxxya'), (63,13,'zyxa'), (73,12,'uuua'), + (82,12,'wxa'), (85,15,'xd'); + +--echo # range access to t1 by 2-component keys for index idx +let $q1= +select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7)); +eval explain $q1; +eval explain format=json $q1; +eval $q1; +eval prepare stmt from "$q1"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +--echo # range access to t1 by 1-component keys for index idx +let $q2= +select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7)); +eval explain $q2; +eval explain format=json $q2; +eval $q2; + +--echo # range access to t1 by 1-component keys for index idx +let $q3= +select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7)); +eval explain $q3; +eval explain format=json $q3; +eval $q3; + +# this setting should be removed after fixes for mdev-12186, mdev-12187 +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='index_merge=off'; + +create table t2( + d int, e int, key idx1(d), key idx2(e), f varchar(32) +) engine=myisam; + +insert into t2 values + (9,5,'a'), (9,8,'b'), (9,3,'c'), (9,2,'d'), (9,1,'e'), + (6,5,'f'), (6,3,'g'), (6,7,'h'), (3,3,'i'), (6,2,'j'), + (9,5,'aa'), (9,8,'ba'), (9,3,'ca'), (2,2,'da'), (9,1,'ea'), + (6,5,'fa'), (6,3,'ga'), (6,7,'ha'), (9,3,'ia'), (6,2,'ja'); + +--echo # join order: (t2,t1) with ref access of t1 +--echo # range access to t1 by keys for index idx1 +let $q4= +select * from t1,t2 + where a = d and (a,e) in ((3,3),(7,7),(2,2)); +eval explain $q4; +eval explain format=json $q4; +eval $q4; + +insert into t2 values + (4,5,'a'), (7,8,'b'), (4,3,'c'), (1,2,'d'), (2,1,'e'), (5,5,'f'), + (2,3,'g'), (7,7,'h'), (3,3,'i'), (3,2,'j'), (2,2,'k'), (5,5,'l'), + (4,5,'aa'), (7,8,'bb'), (4,3,'cc'), (1,2,'dd'), (2,1,'ee'), (9,5,'ff'), + (2,3,'gg'), (7,7,'hh'), (3,3,'ii'), (3,2,'jj'), (2,2,'kk'), (9,5,'ll'), + (4,5,'aaa'), (7,8,'bbb'), (4,3,'ccc'), (1,2,'ddd'), (2,1,'eee'), (5,5,'fff'), + (2,3,'ggg'), (7,7,'hhh'), (3,3,'iii'), (3,2,'jjj'), (2,2,'kkk'), (5,5,'lll'), + (14,15,'a'), (17,18,'b'), (14,13,'c'), (11,12,'d'), (12,11,'e'), (15,15,'f'), + (12,13,'g'), (17,17,'h'), (13,13,'i'), (13,12,'j'), (12,12,'k'), (15,15,'l'), + (24,25,'a'), (27,28,'b'), (24,23,'c'), (21,22,'d'), (22,21,'e'), (25,25,'f'), + (22,23,'g'), (27,27,'h'), (23,23,'i'), (23,22,'j'), (22,22,'k'), (25,25,'l'), + (34,35,'a'), (37,38,'b'), (34,33,'c'), (31,32,'d'), (32,31,'e'), (35,35,'f'), + (32,33,'g'), (37,37,'h'), (33,33,'i'), (33,32,'j'), (32,32,'k'), (35,35,'l'), + (44,45,'a'), (47,48,'b'), (44,43,'c'), (41,42,'d'), (42,41,'e'), (45,45,'f'), + (42,43,'g'), (47,47,'h'), (43,43,'i'), (43,42,'j'), (42,42,'k'), (45,45,'l'); + +--echo # join order: (t1,t2) with ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q5= +select * from t1,t2 + where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1; +eval explain $q5; +eval explain format=json $q5; +eval $q5; +eval prepare stmt from "$q5"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +insert into t1 select * from t1; + +--echo # join order: (t2,t1) with ref access of t1 +--echo # range access to t2 by keys for index idx2 +let $q6= +select * from t1,t2 + where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +eval explain $q6; +eval explain format=json $q6; +eval $q6; + +alter table t2 drop index idx1, drop index idx2, add index idx3(d,e); + +--echo # join order: (t2,t1) with ref access of t1 +--echo # range access to t2 by 2-component keys for index idx3 +let $q7= +select * from t1,t2 + where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1; +eval explain $q7; +eval explain format=json $q7; +eval $q7; + +--echo # join order: (t1,t2) with ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q8= +select * from t1,t2 + where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1; +eval explain $q8; +eval explain format=json $q8; +eval $q8; + +--echo # join order: (t1,t2) with ref access of t2 +--echo # no range access +let $q9= +select * from t1,t2 + where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1; +eval explain $q9; +eval explain format=json $q9; +eval $q9; + +--echo # join order: (t1,t2) with ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q10= +select * from t1,t2 + where a = d and (a,2) in ((2,2),(7,7),(8,8)) and + length(c) = 1 and length(f) = 1; +eval explain $q10; +eval explain format=json $q10; +eval $q10; +eval prepare stmt from "$q10"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +create table t3 (id int primary key, v int) engine=myisam; + +insert into t3 values + (3,2), (1,1), (4,12), (2,15); + +--echo # join order: (t3,t1,t2) with const t3 and ref access of t2 +--echo # range access to t1 by 1-component keys for index idx +let $q11= +select * from t1,t2,t3 + where id = 1 and a = d and + (a,v+1) in ((2,2),(7,7),(8,8)) and + length(c) = 1 and length(f) = 1; +eval explain $q11; +eval explain format=json $q11; +eval $q11; + +--echo # IN predicate is always FALSE +let $q12= +select * from t1,t2,t3 + where id = 1 and a = d and + (a,v+1) in ((9,9),(7,7),(8,8)) and + length(c) = 1 and length(f) = 1; +eval explain $q12; +eval prepare stmt from "$q12"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +set optimizer_switch=@save_optimizer_switch; + +drop table t1,t2,t3; + +--echo # +--echo # End of 10.2 tests +--echo # + diff --git a/mysql-test/t/shutdown.test b/mysql-test/t/shutdown.test index 7080f9a1a71a6..775628e441d4a 100644 --- a/mysql-test/t/shutdown.test +++ b/mysql-test/t/shutdown.test @@ -34,6 +34,5 @@ drop user user1@localhost; --echo # MDEV-8491 - On shutdown, report the user and the host executed that. --echo # --let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_RANGE= -50000 --let SEARCH_PATTERN=mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown --source include/search_pattern_in_file.inc diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index eaaebba166c41..df5c7d3495da8 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -5387,6 +5387,7 @@ create view v1 as select 1; --let $MYSQLD_DATADIR= `select @@datadir` --let SEARCH_FILE= $MYSQLD_DATADIR/test/v1.frm +--let SEARCH_RANGE= 50000 --let SEARCH_PATTERN=mariadb-version --source include/search_pattern_in_file.inc diff --git a/mysql-test/t/wait_timeout_not_windows.test b/mysql-test/t/wait_timeout_not_windows.test index de4904fada2e5..5073177984586 100644 --- a/mysql-test/t/wait_timeout_not_windows.test +++ b/mysql-test/t/wait_timeout_not_windows.test @@ -10,7 +10,6 @@ set @@wait_timeout=1; sleep 2; connection default; let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; -let SEARCH_RANGE= -50; let SEARCH_PATTERN= Aborted.*Got timeout reading communication packets; source include/search_pattern_in_file.inc; set global log_warnings=@@log_warnings; diff --git a/mysys/my_bit.c b/mysys/my_bit.c index d36f52bb3c0fb..9ceb083cd4870 100644 --- a/mysys/my_bit.c +++ b/mysys/my_bit.c @@ -17,24 +17,6 @@ #include -const char _my_bits_nbits[256] = { - 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, -}; /* perl -e 'print map{", 0x".unpack H2,pack B8,unpack b8,chr$_}(0..255)' diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt index d83c483018333..66b8074406f40 100644 --- a/plugin/aws_key_management/CMakeLists.txt +++ b/plugin/aws_key_management/CMakeLists.txt @@ -51,10 +51,11 @@ ENDIF() FIND_LIBRARY(AWS_CPP_SDK_CORE NAMES aws-cpp-sdk-core PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}") FIND_LIBRARY(AWS_CPP_SDK_KMS NAMES aws-cpp-sdk-kms PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}") SET(CMAKE_REQUIRED_FLAGS ${CXX11_FLAGS}) -CHECK_INCLUDE_FILE_CXX(aws/kms/KMSClient.h HAVE_AWS_HEADERS) +FIND_PATH(AWS_CPP_SDK_INCLUDE_DIR NAMES aws/kms/KMSClient.h) -IF(AWS_CPP_SDK_CORE AND AWS_CPP_SDK_KMS AND HAVE_AWS_HEADERS) - # AWS C++ SDK installed +IF(AWS_CPP_SDK_CORE AND AWS_CPP_SDK_KMS AND AWS_CPP_SDK_INCLUDE_DIR) + # AWS C++ SDK installed + INCLUDE_DIRECTORIES(${AWS_CPP_SDK_INCLUDE_DIR}) SET(AWS_SDK_LIBS ${AWS_CPP_SDK_CORE} ${AWS_CPP_SDK_KMS}) ELSE() OPTION(AWS_SDK_EXTERNAL_PROJECT "Allow download and build AWS C++ SDK" OFF) @@ -95,14 +96,28 @@ ELSE() SET(EXTRA_SDK_CMAKE_FLAGS ${EXTRA_SDK_CMAKE_FLAGS} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}) ENDIF() + SET(byproducts ) + # We do not need to build the whole SDK , just 2 of its libs + set(AWS_SDK_LIBS aws-cpp-sdk-core aws-cpp-sdk-kms) + FOREACH(lib ${AWS_SDK_LIBS}) + ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) + ADD_DEPENDENCIES(${lib} aws_sdk_cpp) + SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") + IF(CMAKE_VERSION VERSION_GREATER "3.1") + SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc}) + ENDIF() + SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) + ENDFOREACH() + SET(AWS_SDK_PATCH_COMMAND ) ExternalProject_Add( aws_sdk_cpp GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git" GIT_TAG "1.0.8" UPDATE_COMMAND "" - SOURCE_DIR "${CMAKE_BINARY_DIR}/aws-sdk-cpp" - CMAKE_ARGS + SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp" + ${byproducts} + CMAKE_ARGS -DBUILD_ONLY=kms -DBUILD_SHARED_LIBS=OFF -DFORCE_SHARED_CRT=OFF @@ -111,34 +126,28 @@ ELSE() "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}" "-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}" ${EXTRA_SDK_CMAKE_FLAGS} - -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR}/aws_sdk_cpp + -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp TEST_COMMAND "" ) SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) - # We do not need to build the whole SDK , just 2 of its libs - set(AWS_SDK_LIBS aws-cpp-sdk-core aws-cpp-sdk-kms) - FOREACH(lib ${AWS_SDK_LIBS}) - ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL) - ADD_DEPENDENCIES(${lib} aws_sdk_cpp) - SET(loc "${CMAKE_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}") - SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc}) - IF(WIN32) - SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "bcrypt;winhttp;wininet;userenv") - ELSE() - SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${SSL_LIBRARIES};${CURL_LIBRARIES};${UUID_LIBRARIES}") - ENDIF() - ENDFOREACH() IF(CMAKE_SYSTEM_NAME MATCHES "Linux") # Need whole-archive , otherwise static libraries are not linked SET(AWS_SDK_LIBS -Wl,--whole-archive ${AWS_SDK_LIBS} -Wl,--no-whole-archive) ENDIF() SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/aws_sdk_cpp/include) + INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/include) ENDIF() ADD_DEFINITIONS(${SSL_DEFINES}) # Need to know whether openssl should be initialized SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS}") +IF(WIN32) + SET(AWS_CPP_SDK_DEPENDENCIES bcrypt winhttp wininet userenv version) +ELSE() + SET(AWS_CPP_SDK_DEPENDENCIES ${SSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES}) +ENDIF() MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc - LINK_LIBRARIES ${AWS_SDK_LIBS} + LINK_LIBRARIES ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES} COMPONENT aws-key-management) + + diff --git a/sql/field.cc b/sql/field.cc index 362c49b0abf76..40bd6a8a2ddc1 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8203,7 +8203,7 @@ void Field_blob::sort_string(uchar *to,uint length) uchar *blob; uint blob_length=get_length(); - if (!blob_length) + if (!blob_length && field_charset->pad_char == 0) bzero(to,length); else { diff --git a/sql/field.h b/sql/field.h index 2ed84b82aee03..caacb5f00b8fb 100644 --- a/sql/field.h +++ b/sql/field.h @@ -3312,7 +3312,7 @@ class Field_blob :public Field_longstr { memcpy(ptr,length,packlength); memcpy(ptr+packlength, &data,sizeof(char*)); } - void set_ptr_offset(my_ptrdiff_t ptr_diff, uint32 length, uchar *data) + void set_ptr_offset(my_ptrdiff_t ptr_diff, uint32 length, const uchar *data) { uchar *ptr_ofs= ADD_TO_PTR(ptr,ptr_diff,uchar*); store_length(ptr_ofs, packlength, length); diff --git a/sql/handler.cc b/sql/handler.cc index e07d656c98605..a6016646d3c8b 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2212,7 +2212,8 @@ static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin, if (hton->state == SHOW_OPTION_YES && hton->start_consistent_snapshot) { - hton->start_consistent_snapshot(hton, thd); + if (hton->start_consistent_snapshot(hton, thd)) + return TRUE; *((bool *)arg)= false; } return FALSE; @@ -2220,7 +2221,7 @@ static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin, int ha_start_consistent_snapshot(THD *thd) { - bool warn= true; + bool err, warn= true; /* Holding the LOCK_commit_ordered mutex ensures that we get the same @@ -2230,9 +2231,15 @@ int ha_start_consistent_snapshot(THD *thd) have a consistent binlog position. */ mysql_mutex_lock(&LOCK_commit_ordered); - plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn); + err= plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn); mysql_mutex_unlock(&LOCK_commit_ordered); + if (err) + { + ha_rollback_trans(thd, true); + return 1; + } + /* Same idea as when one wants to CREATE TABLE in one engine which does not exist: @@ -5428,7 +5435,7 @@ int handler::compare_key(key_range *range) This is used by index condition pushdown implementation. */ -int handler::compare_key2(key_range *range) +int handler::compare_key2(key_range *range) const { int cmp; if (!range) diff --git a/sql/handler.h b/sql/handler.h index 2af3158f8401c..43c8b5fd0e08e 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3209,7 +3209,7 @@ class handler :public Sql_alloc virtual int read_range_next(); void set_end_range(const key_range *end_key); int compare_key(key_range *range); - int compare_key2(key_range *range); + int compare_key2(key_range *range) const; virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } void ft_end() { ft_handler=NULL; } virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key) @@ -3959,10 +3959,16 @@ class handler :public Sql_alloc void mark_trx_read_write_internal(); bool check_table_binlog_row_based_internal(bool binlog_row); - /* Private helpers */ +protected: + /* + These are intended to be used only by handler::ha_xxxx() functions + However, engines that implement read_range_XXX() (like MariaRocks) + or embed other engines (like ha_partition) may need to call these also + */ inline void increment_statistics(ulong SSV::*offset) const; inline void decrement_statistics(ulong SSV::*offset) const; +private: /* Low-level primitives for storage engines. These should be overridden by the storage engine class. To call these methods, use diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 9ebfa7fa22893..2af4a32d844b3 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -2108,6 +2108,7 @@ class Item_func_in :public Item_func_opt_neg, void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, SARGABLE_PARAM **sargables); SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); + SEL_TREE *get_func_row_mm_tree(RANGE_OPT_PARAM *param, Item_row *key_row); Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) { /* @@ -2169,6 +2170,7 @@ class cmp_item_row :public cmp_item cmp_item *make_same(); void store_value_by_template(THD *thd, cmp_item *tmpl, Item *); friend class Item_func_in; + cmp_item *get_comparator(uint i) { return comparators[i]; } }; @@ -2182,6 +2184,7 @@ class in_row :public in_vector uchar *get_value(Item *item); friend class Item_func_in; Item_result result_type() { return ROW_RESULT; } + cmp_item *get_cmp_item() { return &tmp; } }; /* Functions used by where clause */ diff --git a/sql/item_row.h b/sql/item_row.h index f8858738b7829..5084f042473f5 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -120,6 +120,13 @@ class Item_row: public Item, bool check_cols(uint c); bool null_inside() { return with_null; }; void bring_value(); + + Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + { + Item_args::propagate_equal_fields(thd, Context_identity(), cond); + return this; + } + bool check_vcol_func_processor(void *arg) {return FALSE; } Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy(thd, mem_root, this); } diff --git a/sql/key.cc b/sql/key.cc index bb10e902b8b16..0c931184da7f5 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -176,7 +176,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, @param key_length specifies length of all keyparts that will be restored */ -void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, +void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, uint key_length) { uint length; diff --git a/sql/key.h b/sql/key.h index 47b981f529811..f2521e4a665a9 100644 --- a/sql/key.h +++ b/sql/key.h @@ -29,7 +29,7 @@ int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field, uint *key_length, uint *keypart); void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, uint key_length, bool with_zerofill= FALSE); -void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, +void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, uint key_length); bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length); void key_unpack(String *to, TABLE *table, KEY *key); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 6d088cad91ee5..fbdbf76ffd912 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -7210,6 +7210,215 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param, } +/* + The structure Key_col_info is purely auxiliary and is used + only in the method Item_func_in::get_func_row_mm_tree +*/ +struct Key_col_info { + Field *field; /* If != NULL the column can be used for keys */ + cmp_item *comparator; /* If != 0 the column can be evaluated */ +}; + +/** + Build SEL_TREE for the IN predicate whose arguments are rows + + @param param PARAM from SQL_SELECT::test_quick_select + @param key_row First operand of the IN predicate + + @note + The function builds a SEL_TREE for in IN predicate in the case + when the predicate uses row arguments. First the function + detects among the components of the key_row (c[1],...,c[n]) taken + from in the left part the predicate those that can be usable + for building SEL_TREE (c[i1],...,c[ik]). They have to contain + items whose real items are field items referring to the current + table or equal to the items referring to the current table. + For the remaining components of the row it checks whether they + can be evaluated. The result of the analysis is put into the + array of structures of the type Key_row_col_info. + + After this the function builds the SEL_TREE for the following + formula that can be inferred from the given IN predicate: + c[i11]=a[1][i11] AND ... AND c[i1k1]=a[1][i1k1] + OR + ... + OR + c[im1]=a[m][im1] AND ... AND c[imkm]=a[m][imkm]. + Here a[1],...,a[m] are all arguments of the IN predicate from + the right part and for each j ij1,...,ijkj is a subset of + i1,...,ik such that a[j][ij1],...,a[j][ijkj] can be evaluated. + + If for some j there no a[j][i1],...,a[j][ik] can be evaluated + then no SEL_TREE can be built for this predicate and the + function immediately returns 0. + + If for some j by using evaluated values of key_row it can be + proven that c[ij1]=a[j][ij1] AND ... AND c[ijkj]=a[j][ijkj] + is always FALSE then this disjunct is omitted. + + @returns + the built SEL_TREE if it can be constructed + 0 - otherwise. +*/ + +SEL_TREE *Item_func_in::get_func_row_mm_tree(RANGE_OPT_PARAM *param, + Item_row *key_row) +{ + DBUG_ENTER("Item_func_in::get_func_row_mm_tree"); + + if (negated) + DBUG_RETURN(0); + + SEL_TREE *res_tree= 0; + uint used_key_cols= 0; + uint col_comparators= 0; + table_map param_comp= ~(param->prev_tables | param->read_tables | + param->current_table); + uint row_cols= key_row->cols(); + Dynamic_array key_cols_info(row_cols); + cmp_item_row *row_cmp_item; + + if (array) + { + in_row *row= static_cast(array); + row_cmp_item= static_cast(row->get_cmp_item()); + } + else + { + DBUG_ASSERT(get_comparator_type_handler(0) == &type_handler_row); + row_cmp_item= static_cast(get_comparator_cmp_item(0)); + } + DBUG_ASSERT(row_cmp_item); + + Item **key_col_ptr= key_row->addr(0); + for(uint i= 0; i < row_cols; i++, key_col_ptr++) + { + Key_col_info key_col_info= {0, NULL}; + Item *key_col= *key_col_ptr; + if (key_col->real_item()->type() == Item::FIELD_ITEM) + { + /* + The i-th component of key_row can be used for key access if + key_col->real_item() points to a field of the current table or + if it is equal to a field item pointing to such a field. + */ + Item_field *col_field_item= (Item_field *) (key_col->real_item()); + Field *key_col_field= col_field_item->field; + if (key_col_field->table->map != param->current_table) + { + Item_equal *item_equal= col_field_item->item_equal; + if (item_equal) + { + Item_equal_fields_iterator it(*item_equal); + while (it++) + { + key_col_field= it.get_curr_field(); + if (key_col_field->table->map == param->current_table) + break; + } + } + } + if (key_col_field->table->map == param->current_table) + { + key_col_info.field= key_col_field; + used_key_cols++; + } + } + else if (!(key_col->used_tables() & (param_comp | param->current_table)) + && !key_col->is_expensive()) + { + /* The i-th component of key_row can be evaluated */ + + /* See the comment in Item::get_mm_tree_for_const */ + MEM_ROOT *tmp_root= param->mem_root; + param->thd->mem_root= param->old_root; + + key_col->bring_value(); + key_col_info.comparator= row_cmp_item->get_comparator(i); + key_col_info.comparator->store_value(key_col); + col_comparators++; + + param->thd->mem_root= tmp_root; + } + key_cols_info.push(key_col_info); + } + + if (!used_key_cols) + DBUG_RETURN(0); + + uint omitted_tuples= 0; + Item **arg_start= arguments() + 1; + Item **arg_end= arg_start + argument_count() - 1; + for (Item **arg= arg_start ; arg < arg_end; arg++) + { + uint i; + + /* + First check whether the disjunct constructed for *arg + is really needed + */ + Item_row *arg_tuple= (Item_row *) (*arg); + if (col_comparators) + { + MEM_ROOT *tmp_root= param->mem_root; + param->thd->mem_root= param->old_root; + for (i= 0; i < row_cols; i++) + { + Key_col_info *key_col_info= &key_cols_info.at(i); + if (key_col_info->comparator) + { + Item *arg_col= arg_tuple->element_index(i); + if (!(arg_col->used_tables() & (param_comp | param->current_table)) && + !arg_col->is_expensive() && + key_col_info->comparator->cmp(arg_col)) + { + omitted_tuples++; + break; + } + } + } + param->thd->mem_root= tmp_root; + if (i < row_cols) + continue; + } + + /* The disjunct for *arg is needed: build it. */ + SEL_TREE *and_tree= 0; + Item **arg_col_ptr= arg_tuple->addr(0); + for (uint i= 0; i < row_cols; i++, arg_col_ptr++) + { + Key_col_info *key_col_info= &key_cols_info.at(i); + if (!key_col_info->field) + continue; + Item *arg_col= *arg_col_ptr; + if (!(arg_col->used_tables() & (param_comp | param->current_table)) && + !arg_col->is_expensive()) + { + and_tree= tree_and(param, and_tree, + get_mm_parts(param, + key_col_info->field, + Item_func::EQ_FUNC, + arg_col->real_item())); + } + } + if (!and_tree) + { + res_tree= 0; + break; + } + /* Join the disjunct the the OR tree that is being constructed */ + res_tree= !res_tree ? and_tree : tree_or(param, res_tree, and_tree); + } + if (omitted_tuples == argument_count() - 1) + { + /* It's turned out that all disjuncts are always FALSE */ + res_tree= new (param->mem_root) SEL_TREE(SEL_TREE::IMPOSSIBLE, + param->mem_root, param->keys); + } + DBUG_RETURN(res_tree); +} + + /* Build conjunction of all SEL_TREEs for a simple predicate applying equalities @@ -7544,12 +7753,22 @@ SEL_TREE *Item_func_in::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) if (const_item()) DBUG_RETURN(get_mm_tree_for_const(param)); - if (key_item()->real_item()->type() != Item::FIELD_ITEM) + SEL_TREE *tree= 0; + switch (key_item()->real_item()->type()) { + case Item::FIELD_ITEM: + tree= get_full_func_mm_tree(param, + (Item_field*) (key_item()->real_item()), + NULL); + break; + case Item::ROW_ITEM: + tree= get_func_row_mm_tree(param, + (Item_row *) (key_item()->real_item())); + break; + default: DBUG_RETURN(0); - Item_field *field= (Item_field*) (key_item()->real_item()); - SEL_TREE *tree= get_full_func_mm_tree(param, field, NULL); + } DBUG_RETURN(tree); -} +} SEL_TREE *Item_equal::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) diff --git a/sql/sql_hset.h b/sql/sql_hset.h index dc3bd487ce567..4dfddf898f05f 100644 --- a/sql/sql_hset.h +++ b/sql/sql_hset.h @@ -32,10 +32,12 @@ class Hash_set Constructs an empty hash. Does not allocate memory, it is done upon the first insert. Thus does not cause or return errors. */ - Hash_set(uchar *(*K)(const T *, size_t *, my_bool)) + Hash_set(uchar *(*K)(const T *, size_t *, my_bool), + CHARSET_INFO *cs= &my_charset_bin) { my_hash_clear(&m_hash); m_hash.get_key= (my_hash_get_key)K; + m_hash.charset= cs; } /** Destroy the hash by freeing the buckets table. Does @@ -56,7 +58,7 @@ class Hash_set */ bool insert(T *value) { - my_hash_init_opt(&m_hash, &my_charset_bin, START_SIZE, 0, 0, + my_hash_init_opt(&m_hash, m_hash.charset, START_SIZE, 0, 0, m_hash.get_key, 0, MYF(0)); size_t key_len; uchar *v= reinterpret_cast(value); @@ -65,6 +67,10 @@ class Hash_set return my_hash_insert(&m_hash, v); return FALSE; } + bool remove(T *value) + { + return my_hash_delete(&m_hash, reinterpret_cast(value)); + } T *find(const void *key, size_t klen) const { return (T*)my_hash_search(&m_hash, reinterpret_cast(key), klen); @@ -73,6 +79,10 @@ class Hash_set bool is_empty() const { return m_hash.records == 0; } /** Returns the number of unique elements. */ size_t size() const { return static_cast(m_hash.records); } + const T* at(size_t i) const + { + return reinterpret_cast(my_hash_element(const_cast(&m_hash), i)); + } /** An iterator over hash elements. Is not insert-stable. */ class Iterator { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index dd1fa06709ff6..915c7230ed783 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -4709,6 +4709,8 @@ static uint get_semi_join_select_list_index(Field *field) @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates + @param row_col_no if = n that > 0 then field is compared only + against the n-th component of row values @note If we are doing a NOT NULL comparison on a NOT NULL field in a outer join @@ -4722,7 +4724,8 @@ static void add_key_field(JOIN *join, KEY_FIELD **key_fields,uint and_level, Item_bool_func *cond, Field *field, bool eq_func, Item **value, uint num_values, - table_map usable_tables, SARGABLE_PARAM **sargables) + table_map usable_tables, SARGABLE_PARAM **sargables, + uint row_col_no= 0) { uint optimize= 0; if (eq_func && @@ -4751,7 +4754,15 @@ add_key_field(JOIN *join, bool optimizable=0; for (uint i=0; iused_tables(); + Item *curr_val; + if (row_col_no && value[i]->real_item()->type() == Item::ROW_ITEM) + { + Item_row *value_tuple= (Item_row *) (value[i]->real_item()); + curr_val= value_tuple->element_index(row_col_no - 1); + } + else + curr_val= value[i]; + table_map value_used_tables= curr_val->used_tables(); used_tables|= value_used_tables; if (!(value_used_tables & (field->table->map | RAND_TABLE_BIT))) optimizable=1; @@ -4789,7 +4800,15 @@ add_key_field(JOIN *join, bool is_const=1; for (uint i=0; iconst_item())) + Item *curr_val; + if (row_col_no && value[i]->real_item()->type() == Item::ROW_ITEM) + { + Item_row *value_tuple= (Item_row *) (value[i]->real_item()); + curr_val= value_tuple->element_index(row_col_no - 1); + } + else + curr_val= value[i]; + if (!(is_const&= curr_val->const_item())) break; } if (is_const) @@ -4856,12 +4875,14 @@ add_key_field(JOIN *join, @param key_fields Pointer to add key, if usable @param and_level And level, to be stored in KEY_FIELD @param cond Condition predicate - @param field Field used in comparision + @param field_item Field item used for comparison @param eq_func True if we used =, <=> or IS NULL - @param value Value used for comparison with field - Is NULL for BETWEEN and IN + @param value Value used for comparison with field_item + @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates + @param row_col_no if = n that > 0 then field is compared only + against the n-th component of row values @note If field items f1 and f2 belong to the same multiple equality and @@ -4876,11 +4897,12 @@ add_key_equal_fields(JOIN *join, KEY_FIELD **key_fields, uint and_level, Item_bool_func *cond, Item *field_item, bool eq_func, Item **val, uint num_values, table_map usable_tables, - SARGABLE_PARAM **sargables) + SARGABLE_PARAM **sargables, uint row_col_no= 0) { Field *field= ((Item_field *) (field_item->real_item()))->field; add_key_field(join, key_fields, and_level, cond, field, - eq_func, val, num_values, usable_tables, sargables); + eq_func, val, num_values, usable_tables, sargables, + row_col_no); Item_equal *item_equal= field_item->get_item_equal(); if (item_equal) { @@ -4896,7 +4918,7 @@ add_key_equal_fields(JOIN *join, KEY_FIELD **key_fields, uint and_level, { add_key_field(join, key_fields, and_level, cond, equal_field, eq_func, val, num_values, usable_tables, - sargables); + sargables, row_col_no); } } } @@ -5078,6 +5100,24 @@ Item_func_in::add_key_fields(JOIN *join, KEY_FIELD **key_fields, (Item_field*) (args[0]->real_item()), false, args + 1, arg_count - 1, usable_tables, sargables); } + else if (key_item()->type() == Item::ROW_ITEM && + !(used_tables() & OUTER_REF_TABLE_BIT)) + { + Item_row *key_row= (Item_row *) key_item(); + Item **key_col= key_row->addr(0); + uint row_cols= key_row->cols(); + for (uint i= 0; i < row_cols; i++, key_col++) + { + if (is_local_field(*key_col)) + { + Item_field *field_item= (Item_field *)((*key_col)->real_item()); + add_key_equal_fields(join, key_fields, *and_level, this, + field_item, false, args + 1, arg_count - 1, + usable_tables, sargables, i + 1); + } + } + } + } @@ -20590,7 +20630,7 @@ static int test_if_order_by_key(JOIN *join, key_parts= (uint) (key_part - table->key_info[idx].key_part); if (reverse == -1 && - !(table->file->index_flags(idx, user_defined_kp, 1) & HA_READ_PREV)) + !(table->file->index_flags(idx, user_defined_kp-1, 1) & HA_READ_PREV)) reverse= 0; // Index can't be used if (have_pk_suffix && reverse == -1) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 6564504eff471..d1d1410d51ac4 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3386,7 +3386,7 @@ static bool show_status_array(THD *thd, const char *wild, for (; variables->name; variables++) { - bool wild_checked; + bool wild_checked= false; strnmov(prefix_end, variables->name, len); name_buffer[sizeof(name_buffer)-1]=0; /* Safety */ @@ -3452,8 +3452,8 @@ static bool show_status_array(THD *thd, const char *wild, else { if ((wild_checked || - (wild && wild[0] && wild_case_compare(system_charset_info, - name_buffer, wild))) && + !(wild && wild[0] && wild_case_compare(system_charset_info, + name_buffer, wild))) && (!cond || cond->val_int())) { const char *pos; // We assign a lot of const's diff --git a/storage/innobase/btr/btr0scrub.cc b/storage/innobase/btr/btr0scrub.cc index 4cb46dd415f4f..0d59481c4b542 100644 --- a/storage/innobase/btr/btr0scrub.cc +++ b/storage/innobase/btr/btr0scrub.cc @@ -136,15 +136,15 @@ btr_scrub_lock_dict_func(ulint space_id, bool lock_to_close_table, * if we don't lock to close a table, we check if space * is closing, and then instead give up */ - if (lock_to_close_table == false) { - fil_space_t* space = fil_space_acquire(space_id); - if (!space || space->stop_new_ops) { - if (space) { - fil_space_release(space); - } + if (lock_to_close_table) { + } else if (fil_space_t* space = fil_space_acquire(space_id)) { + bool stopping = space->is_stopping(); + fil_space_release(space); + if (stopping) { return false; } - fil_space_release(space); + } else { + return false; } os_thread_sleep(250000); @@ -206,18 +206,15 @@ btr_scrub_table_close_for_thread( return; } - fil_space_t* space = fil_space_acquire(scrub_data->space); - - /* If tablespace is not marked as stopping perform - the actual close. */ - if (space && !space->is_stopping()) { - mutex_enter(&dict_sys->mutex); - /* perform the actual closing */ - btr_scrub_table_close(scrub_data->current_table); - mutex_exit(&dict_sys->mutex); - } - - if (space) { + if (fil_space_t* space = fil_space_acquire(scrub_data->space)) { + /* If tablespace is not marked as stopping perform + the actual close. */ + if (!space->is_stopping()) { + mutex_enter(&dict_sys->mutex); + /* perform the actual closing */ + btr_scrub_table_close(scrub_data->current_table); + mutex_exit(&dict_sys->mutex); + } fil_space_release(space); } diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 11fe77d75de70..a6ed277a90e04 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -7535,12 +7535,12 @@ buf_page_decrypt_after_read(buf_page_t* bpage) return (true); } - FilSpace space(bpage->id.space()); + FilSpace space(bpage->id.space(), true); /* Page is encrypted if encryption information is found from tablespace and page contains used key_version. This is true also for pages first compressed and then encrypted. */ - if (!space()->crypt_data) { + if (!space() || !space()->crypt_data) { key_version = 0; } diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index f7883ded07080..9b86b1c16da1f 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -540,7 +540,7 @@ buf_load() f = fopen(full_filename, "r"); if (f == NULL) { - buf_load_status(STATUS_ERR, + buf_load_status(STATUS_INFO, "Cannot open '%s' for reading: %s", full_filename, strerror(errno)); return; diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index b38899e6de49b..06ec31a7ed42b 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2268,12 +2268,13 @@ Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID @param[in] silent whether to silently ignore missing tablespaces -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ inline fil_space_t* -fil_space_acquire_low( - ulint id, - bool silent) +fil_space_acquire_low(ulint id, bool silent, bool for_io = false) { fil_space_t* space; @@ -2286,7 +2287,7 @@ fil_space_acquire_low( ib::warn() << "Trying to access missing" " tablespace " << id; } - } else if (space->stop_new_ops || space->is_being_truncated) { + } else if (!for_io && space->is_stopping()) { space = NULL; } else { space->n_pending_ops++; @@ -2301,22 +2302,24 @@ fil_space_acquire_low( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io) { - return(fil_space_acquire_low(id, false)); + return(fil_space_acquire_low(id, false, for_io)); } /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) { return(fil_space_acquire_low(id, true)); } @@ -2324,8 +2327,7 @@ fil_space_acquire_silent( /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space) +fil_space_release(fil_space_t* space) { mutex_enter(&fil_system->mutex); ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); @@ -5479,8 +5481,7 @@ fil_flush( if (fil_space_t* space = fil_space_get_by_id(space_id)) { if (space->purpose != FIL_TYPE_TEMPORARY - && !space->stop_new_ops - && !space->is_being_truncated) { + && !space->is_stopping()) { fil_flush_low(space); } } @@ -5524,8 +5525,7 @@ fil_flush_file_spaces( space = UT_LIST_GET_NEXT(unflushed_spaces, space)) { if (space->purpose == purpose - && !space->stop_new_ops - && !space->is_being_truncated) { + && !space->is_stopping()) { space_ids[n_space_ids++] = space->id; } @@ -6701,8 +6701,7 @@ If NULL, use the first fil_space_t on fil_system->space_list. @return pointer to the next fil_space_t. @retval NULL if this was the last*/ fil_space_t* -fil_space_next( - fil_space_t* prev_space) +fil_space_next(fil_space_t* prev_space) { fil_space_t* space=prev_space; @@ -6725,8 +6724,8 @@ fil_space_next( fil_ibd_create(), or dropped, or !tablespace. */ while (space != NULL && (UT_LIST_GET_LEN(space->chain) == 0 - || space->stop_new_ops - || space->purpose != FIL_TYPE_TABLESPACE)) { + || space->is_stopping() + || space->purpose != FIL_TYPE_TABLESPACE)) { space = UT_LIST_GET_NEXT(space_list, space); } diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index abd9ff9a9ede6..e63550f26ff2f 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -735,27 +735,28 @@ MY_ATTRIBUTE((warn_unused_result)); Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io = false) MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) MY_ATTRIBUTE((warn_unused_result)); /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space); +fil_space_release(fil_space_t* space); /** Return the next fil_space_t. Once started, the caller must keep calling this until it returns NULL. @@ -792,17 +793,19 @@ class FilSpace FilSpace() : m_space(NULL) {} /** Constructor: Look up the tablespace and increment the - referece count if found. - @param[in] space_id tablespace ID */ - explicit FilSpace(ulint space_id) - : m_space(fil_space_acquire(space_id)) {} + reference count if found. + @param[in] space_id tablespace ID + @param[in] for_io whether to look up the tablespace + while performing I/O + (possibly executing TRUNCATE) */ + explicit FilSpace(ulint space_id, bool for_io = false) + : m_space(fil_space_acquire(space_id, for_io)) {} /** Assignment operator: This assumes that fil_space_acquire() has already been done for the fil_space_t. The caller must assign NULL if it calls fil_space_release(). @param[in] space tablespace to assign */ - class FilSpace& operator=( - fil_space_t* space) + class FilSpace& operator=(fil_space_t* space) { /* fil_space_acquire() must have been invoked. */ ut_ad(space == NULL || space->n_pending_ops > 0); diff --git a/storage/rocksdb/.clang-format b/storage/rocksdb/.clang-format new file mode 100644 index 0000000000000..d80b012dd4b03 --- /dev/null +++ b/storage/rocksdb/.clang-format @@ -0,0 +1,93 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: false +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: All +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: false +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + - Regex: '^(<|"(gtest|isl|json)/)' + Priority: 3 + - Regex: '.*' + Priority: 1 +IncludeIsMainRegex: '$' +IndentCaseLabels: false +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never +JavaScriptQuotes: Leave +... diff --git a/storage/rocksdb/.gitignore b/storage/rocksdb/.gitignore new file mode 100644 index 0000000000000..adf3e154c3612 --- /dev/null +++ b/storage/rocksdb/.gitignore @@ -0,0 +1,2 @@ +build_version.cc +.* diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt new file mode 100644 index 0000000000000..308bd26592e49 --- /dev/null +++ b/storage/rocksdb/CMakeLists.txt @@ -0,0 +1,178 @@ +# TODO: Copyrights + +MACRO(SKIP_ROCKSDB_PLUGIN msg) + MESSAGE_ONCE(SKIP_ROCKSDB_PLUGIN "Can't build rocksdb engine - ${msg}") + RETURN() +ENDMACRO() + +IF(NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile AND GIT_EXECUTABLE) + EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule init + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") + EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") +ENDIF() + +IF (NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/Makefile") + SKIP_ROCKSDB_PLUGIN("Missing Makefile in rocksdb directory. Try \"git submodule update\".") +ENDIF() + +# We've had our builders hang during the build process. This prevents MariaRocks +# to be built on 32 bit intel OS kernels. +IF(CMAKE_SYSTEM_PROCESSOR MATCHES "i[36]86") + SKIP_ROCKSDB_PLUGIN("Intel 32 bit not supported.") +ENDIF() + +# +# Also, disable building on 32-bit Windows +# +IF (WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 4) + SKIP_ROCKSDB_PLUGIN("32-Bit Windows are temporarily disabled") +ENDIF() + +# This plugin needs recent C++ compilers (it is using C++11 features) +# Skip build for the old compilers +SET(CXX11_FLAGS) +SET(OLD_COMPILER_MSG "requires c++11 -capable compiler (minimal supported versions are g++ 4.8, clang 3.3, VS2015)") + +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU") + EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + IF (GCC_VERSION VERSION_LESS 4.8) + SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") + ENDIF() + SET(CXX11_FLAGS "-std=c++11") +ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR + (CLANG_VERSION_STRING AND CLANG_VERSION_STRING VERSION_LESS 3.3)) + SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") + ENDIF() + SET(CXX11_FLAGS "-std=c++11 -stdlib=libstdc++") +ELSEIF(MSVC) + IF (MSVC_VERSION LESS 1900) + SKIP_ROCKSDB_PLUGIN("${OLD_COMPILER_MSG}") + ENDIF() +ELSE() + SKIP_ROCKSDB_PLUGIN("Compiler not supported") +ENDIF() + +IF(CXX11_FLAGS) + ADD_DEFINITIONS(${CXX11_FLAGS}) +ENDIF() + +SET(ROCKSDB_SE_SOURCES + rdb_mariadb_server_port.cc + rdb_mariadb_server_port.h + ha_rocksdb.cc + ha_rocksdb.h + rdb_i_s.cc + rdb_i_s.h + rdb_mutex_wrapper.cc + rdb_mutex_wrapper.h + rdb_index_merge.cc + rdb_index_merge.h + properties_collector.cc + properties_collector.h + rdb_datadic.cc + rdb_datadic.h + rdb_cf_manager.cc + rdb_cf_manager.h + rdb_utils.cc rdb_utils.h + rdb_threads.cc + rdb_threads.h + rdb_psi.h + rdb_psi.cc +) + +MYSQL_ADD_PLUGIN(rocksdb ${ROCKSDB_SE_SOURCES} STORAGE_ENGINE + MODULE_OUTPUT_NAME ha_rocksdb + COMPONENT rocksdb-engine) + +IF(NOT TARGET rocksdb) + # Bail out if compilation with rocksdb engine is not requested + RETURN() +ENDIF() + +# MARIAROCKS-TODO: ??? +CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE) +IF(HAVE_FALLOCATE) + ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT) +ENDIF() + +INCLUDE(build_rocksdb.cmake) + +ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib + ha_rocksdb_proto.h + logger.h + rdb_comparator.h + rdb_cf_options.cc + rdb_cf_options.h + event_listener.cc + event_listener.h + rdb_perf_context.cc + rdb_perf_context.h + rdb_sst_info.cc + rdb_sst_info.h + rdb_buff.h + rdb_mariadb_port.h +) + +ADD_DEPENDENCIES(rocksdb_aux_lib GenError) + +TARGET_LINK_LIBRARIES(rocksdb_aux_lib rocksdblib ${ZLIB_LIBRARY}) +TARGET_LINK_LIBRARIES(rocksdb rocksdb_aux_lib) + +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + + # MARIAROCKS_NOT_YET: Add -frtti flag when compiling RocksDB files. + # TODO: is this the right way to do this? + # - SQL layer and storage/rocksdb/*.cc are compiled with -fnortti + # - RocksDB files are compiled with "-fnortti ... -frtti" + # - This causes RocksDB headers to be compiled with different settings: + # = with RTTI when compiling RocksDB + # = without RTTI when compiling storage/rocksdb/*.cc + # + # (facebook/mysql-5.6 just compiles everything without -f*rtti, which means + # everything is compiled with -frtti) + # + # (also had to add -frtti above, because something that event_listener.cc + # includes requires it. So, now everything in MariaRocks is compiled with + # -frtti) + set_source_files_properties(event_listener.cc rdb_cf_options.cc + PROPERTIES COMPILE_FLAGS -frtti) +ENDIF() + +CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU) +IF(HAVE_SCHED_GETCPU) + ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1) +ENDIF() + +# +# MariaDB: Dynamic plugin build is not suitable with unittest ATM +# +#IF(WITH_UNIT_TESTS AND WITH_EMBEDDED_SERVER) +# ADD_SUBDIRECTORY(unittest) +#ENDIF() + +ADD_LIBRARY(rocksdb_tools STATIC + rocksdb/tools/ldb_tool.cc + rocksdb/tools/ldb_cmd.cc + rocksdb/tools/sst_dump_tool.cc +) + +MYSQL_ADD_EXECUTABLE(sst_dump rocksdb/tools/sst_dump.cc COMPONENT rocksdb-engine) +TARGET_LINK_LIBRARIES(sst_dump rocksdblib) + +MYSQL_ADD_EXECUTABLE(mysql_ldb tools/mysql_ldb.cc COMPONENT rocksdb-engine) +TARGET_LINK_LIBRARIES(mysql_ldb rocksdb_tools rocksdb_aux_lib) + +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + SET_TARGET_PROPERTIES(rocksdb_tools sst_dump mysql_ldb PROPERTIES COMPILE_FLAGS -frtti) +ENDIF() +IF(MSVC) + # RocksDB, the storage engine, overdoes "const" by adding + # additional const qualifiers to parameters of the overriden virtual functions + # This creates a lot of warnings, that we silence here. + ADD_DEFINITIONS(/wd4373) + + # Some checks in C++ runtime that make debug build much slower + ADD_DEFINITIONS(-D_ITERATOR_DEBUG_LEVEL=0) +ENDIF() diff --git a/storage/rocksdb/README b/storage/rocksdb/README new file mode 100644 index 0000000000000..472b7986f9150 --- /dev/null +++ b/storage/rocksdb/README @@ -0,0 +1,38 @@ +== Summary == +This directory contains RocksDB-based Storage Engine (RDBSE) for MySQL = "MyRocks". + +== Resources == +See https://github.com/facebook/mysql-5.6/wiki/Getting-Started-with-MyRocks +Facebook group: https://www.facebook.com/groups/mysqlonrocksdb/ + +== Coding Conventions == +The baseline for MyRocks coding conventions is the MySQL set, available at +http://dev.mysql.com/doc/internals/en/coding-guidelines.html. + +Several refinements: + 0. There is an umbrella C++ namespace named "myrocks" for all MyRocks code. + 1. We introduced "RDB" as the super-short abbreviation for "RocksDB". We will + use it as a name prefix, with different capitalization (see below), to ease + up code navigation with ctags and grep. + N.B. For ease of matching, we'll keep the variables and functions dealing + with sysvars as close as possible to the outside visible names of + sysvars, which start with "rocksdb_" prefix, the outward storage + engine name. + 2. The names for classes, interfaces, and C++ structures (which act as + classes), start with prefix "Rdb_". + NB: For historical reasons, we'll keep the "ha_" class + name for ha_rocksdb class, which is an exception to the rule. + 3. The names for global objects and functions start with prefix "rdb_". + 4. The names for macros and constants start with prefix "RDB_". + 5. Regular class member names start with "m_". + 6. Static class member names start with "s_". + 7. Given the 80 character per line limit, we'll not always use full English + words in names, when a well known or easily recognizable abbreviation + exists (like "tx" for "transaction" or "param" for "parameter" etc). + 8. When needing to disambiguate, we use different suffixes for that, like + "_arg" for a function argument/parameter, "_arr" for a C style array, and + "_vect" for a std::vector etc. + +== Running Tests == +To run tests from rocksdb, rocksd_rpl or rocksdb_sys_vars packages, use the following parameters: +--mysqld=--default-storage-engine=rocksdb --mysqld=--skip-innodb --mysqld=--default-tmp-storage-engine=MyISAM --mysqld=--rocksdb \ No newline at end of file diff --git a/storage/rocksdb/atomic_stat.h b/storage/rocksdb/atomic_stat.h new file mode 100644 index 0000000000000..04e59bd9a8a81 --- /dev/null +++ b/storage/rocksdb/atomic_stat.h @@ -0,0 +1,94 @@ +/* This is an atomic integer abstract data type, for high-performance + tracking of a single stat. It intentionally permits inconsistent + atomic operations and reads, for better performance. This means + that, though no data should ever be lost by this stat, reads of it + at any time may not include all changes up to any particular point. + + So, values read from these may only be approximately correct. + + If your use-case will fail under these conditions, do not use this. + + Copyright (C) 2012 - 2014 Steaphan Greene + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor + Boston, MA 02110-1301, USA. +*/ + +#ifndef _atomic_stat_h_ +#define _atomic_stat_h_ + +#include + +template < typename TYPE > +class atomic_stat { +public: + // Initialize value to the default for the type + atomic_stat() : value_(TYPE()) {}; + + // This enforces a strict order, as all absolute sets should + void clear() { + value_.store(TYPE(), std::memory_order_seq_cst); + }; + + // Reads can get any valid value, it doesn't matter which, exactly + TYPE load() const { + return value_.load(std::memory_order_relaxed); + }; + + // This only supplies relative arithmetic operations + // These are all done atomically, and so can show up in any order + void inc(const TYPE &other) { + value_.fetch_add(other, std::memory_order_relaxed); + }; + + void dec(const TYPE &other) { + value_.fetch_sub(other, std::memory_order_relaxed); + }; + + void inc() { + value_.fetch_add(1, std::memory_order_relaxed); + }; + + void dec() { + value_.fetch_sub(1, std::memory_order_relaxed); + }; + + // This will make one attempt to set the value to the max of + // the current value, and the passed-in value. It can fail + // for any reason, and we only try it once. + void set_max_maybe(const TYPE &new_val) { + TYPE old_val = value_; + if (new_val > old_val) { + value_.compare_exchange_weak(old_val, new_val, + std::memory_order_relaxed, + std::memory_order_relaxed); + } + }; + + // This will make one attempt to assign the value to the passed-in + // value. It can fail for any reason, and we only try it once. + void set_maybe(const TYPE &new_val) { + TYPE old_val = value_; + value_.compare_exchange_weak(old_val, new_val, + std::memory_order_relaxed, + std::memory_order_relaxed); + }; + +private: + std::atomic value_; +}; + +#endif // _atomic_stat_h_ diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake new file mode 100644 index 0000000000000..4b830bbdf32ec --- /dev/null +++ b/storage/rocksdb/build_rocksdb.cmake @@ -0,0 +1,370 @@ + +if(POLICY CMP0042) + cmake_policy(SET CMP0042 NEW) +endif() + +SET(ROCKSDB_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb) + +INCLUDE_DIRECTORIES( + ${ROCKSDB_SOURCE_DIR} + ${ROCKSDB_SOURCE_DIR}/include + ${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src +) + +list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/") + +if(WIN32) + # include(${ROCKSDB_SOURCE_DIR}/thirdparty.inc) +else() + option(WITH_ROCKSDB_JEMALLOC "build RocksDB with JeMalloc" OFF) + if(WITH_ROCKSDB_JEMALLOC) + find_package(JeMalloc REQUIRED) + add_definitions(-DROCKSDB_JEMALLOC) + include_directories(${JEMALLOC_INCLUDE_DIR}) + endif() + if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") + # FreeBSD has jemaloc as default malloc + add_definitions(-DROCKSDB_JEMALLOC) + set(WITH_JEMALLOC ON) + endif() +endif() + +include (CheckTypeSize) +check_type_size(size_t SIZEOF_SIZE_T) +set_property(SOURCE ha_rocksdb.cc APPEND PROPERTY COMPILE_DEFINITIONS + SIZEOF_SIZE_T=${SIZEOF_SIZE_T} SIZEOF_UINT64_T=8) + +# Optional compression libraries. + +foreach(compression_lib LZ4 BZIP2 ZSTD snappy) + FIND_PACKAGE(${compression_lib} QUIET) + + SET(WITH_ROCKSDB_${compression_lib} AUTO CACHE STRING + "Build RocksDB with ${compression_lib} compression. Possible values are 'ON', 'OFF', 'AUTO' and default is 'AUTO'") + + if(${WITH_ROCKSDB_${compression_lib}} STREQUAL "ON" AND NOT ${${compression_lib}_FOUND}) + MESSAGE(FATAL_ERROR + "${compression_lib} library was not found, but WITH_ROCKSDB${compression_lib} option is ON.\ + Either set WITH_ROCKSDB${compression_lib} to OFF, or make sure ${compression_lib} is installed") + endif() +endforeach() + +if(LZ4_FOUND AND (NOT WITH_ROCKSDB_LZ4 STREQUAL "OFF")) + add_definitions(-DLZ4) + include_directories(${LZ4_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${LZ4_LIBRARY}) +endif() + +if(BZIP2_FOUND AND (NOT WITH_ROCKSDB_BZIP2 STREQUAL "OFF")) + add_definitions(-DBZIP2) + include_directories(${BZIP2_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${BZIP2_LIBRARIES}) +endif() + +if(SNAPPY_FOUND AND (NOT WITH_ROCKSDB_SNAPPY STREQUAL "OFF")) + add_definitions(-DSNAPPY) + include_directories(${SNAPPY_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES}) +endif() + +if(ZSTD_FOUND AND (NOT WITH_ROCKSDB_ZSTD STREQUAL "OFF")) + add_definitions(-DZSTD) + include_directories(${ZSTD_INCLUDE_DIR}) + list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARY}) +endif() + +add_definitions(-DZLIB) +list(APPEND THIRDPARTY_LIBS ${ZLIB_LIBRARY}) + +if(CMAKE_SYSTEM_NAME MATCHES "Cygwin") + add_definitions(-fno-builtin-memcmp -DCYGWIN) +elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin") + add_definitions(-DOS_MACOSX) +elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") + add_definitions(-DOS_LINUX) +elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS") + add_definitions(-DOS_SOLARIS) +elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + add_definitions(-DOS_FREEBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD") + add_definitions(-DOS_NETBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD") + add_definitions(-DOS_OPENBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly") + add_definitions(-DOS_DRAGONFLYBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "Android") + add_definitions(-DOS_ANDROID) +elseif(CMAKE_SYSTEM_NAME MATCHES "Windows") + add_definitions(-DOS_WIN) +endif() + +IF(MSVC) + add_definitions(/wd4244) +ENDIF() +if(NOT WIN32) + add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) +endif() + +option(WITH_FALLOCATE "build with fallocate" ON) + +if(WITH_FALLOCATE AND UNIX) + include(CheckCSourceCompiles) + CHECK_C_SOURCE_COMPILES(" +#include +#include +int main() { + int fd = open(\"/dev/null\", 0); + fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024); +} +" HAVE_FALLOCATE) + if(HAVE_FALLOCATE) + add_definitions(-DROCKSDB_FALLOCATE_PRESENT) + endif() +endif() + +include(CheckFunctionExists) +CHECK_FUNCTION_EXISTS(malloc_usable_size HAVE_MALLOC_USABLE_SIZE) +if(HAVE_MALLOC_USABLE_SIZE) + add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE) +endif() + +include_directories(${ROCKSDB_SOURCE_DIR}) +include_directories(${ROCKSDB_SOURCE_DIR}/include) +include_directories(SYSTEM ${ROCKSDB_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src) + +find_package(Threads REQUIRED) +if(WIN32) + set(SYSTEM_LIBS ${SYSTEM_LIBS} Shlwapi.lib Rpcrt4.lib) +else() + set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT}) +endif() + +set(ROCKSDB_LIBS rocksdblib}) +set(LIBS ${ROCKSDB_LIBS} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) + +#add_subdirectory(${ROCKSDB_SOURCE_DIR}/tools) + +# Main library source code + +set(ROCKSDB_SOURCES + db/auto_roll_logger.cc + db/builder.cc + db/c.cc + db/column_family.cc + db/compacted_db_impl.cc + db/compaction.cc + db/compaction_iterator.cc + db/compaction_job.cc + db/compaction_picker.cc + db/convenience.cc + db/dbformat.cc + db/db_filesnapshot.cc + db/db_impl.cc + db/db_impl_debug.cc + db/db_impl_experimental.cc + db/db_impl_readonly.cc + db/db_info_dumper.cc + db/db_iter.cc + db/event_helpers.cc + db/external_sst_file_ingestion_job.cc + db/experimental.cc + db/filename.cc + db/file_indexer.cc + db/flush_job.cc + db/flush_scheduler.cc + db/forward_iterator.cc + db/internal_stats.cc + db/log_reader.cc + db/log_writer.cc + db/managed_iterator.cc + db/memtable.cc + db/memtable_allocator.cc + db/memtable_list.cc + db/merge_helper.cc + db/merge_operator.cc + db/range_del_aggregator.cc + db/repair.cc + db/snapshot_impl.cc + db/table_cache.cc + db/table_properties_collector.cc + db/transaction_log_impl.cc + db/version_builder.cc + db/version_edit.cc + db/version_set.cc + db/wal_manager.cc + db/write_batch.cc + db/write_batch_base.cc + db/write_controller.cc + db/write_thread.cc + memtable/hash_cuckoo_rep.cc + memtable/hash_linklist_rep.cc + memtable/hash_skiplist_rep.cc + memtable/skiplistrep.cc + memtable/vectorrep.cc + port/stack_trace.cc + table/adaptive_table_factory.cc + table/block.cc + table/block_based_filter_block.cc + table/block_based_table_builder.cc + table/block_based_table_factory.cc + table/block_based_table_reader.cc + table/block_builder.cc + table/block_prefix_index.cc + table/bloom_block.cc + table/cuckoo_table_builder.cc + table/cuckoo_table_factory.cc + table/cuckoo_table_reader.cc + table/flush_block_policy.cc + table/format.cc + table/full_filter_block.cc + table/get_context.cc + table/iterator.cc + table/merging_iterator.cc + table/sst_file_writer.cc + table/meta_blocks.cc + table/plain_table_builder.cc + table/plain_table_factory.cc + table/plain_table_index.cc + table/plain_table_key_coding.cc + table/plain_table_reader.cc + table/persistent_cache_helper.cc + table/table_properties.cc + table/two_level_iterator.cc + tools/sst_dump_tool.cc + tools/db_bench_tool.cc + tools/dump/db_dump_tool.cc + util/arena.cc + util/bloom.cc + util/cf_options.cc + util/clock_cache.cc + util/coding.cc + util/compaction_job_stats_impl.cc + util/comparator.cc + util/concurrent_arena.cc + util/crc32c.cc + util/db_options.cc + util/delete_scheduler.cc + util/dynamic_bloom.cc + util/env.cc + util/env_chroot.cc + util/env_hdfs.cc + util/event_logger.cc + util/file_util.cc + util/file_reader_writer.cc + util/sst_file_manager_impl.cc + util/filter_policy.cc + util/hash.cc + util/histogram.cc + util/histogram_windowing.cc + util/instrumented_mutex.cc + util/iostats_context.cc + + util/lru_cache.cc + tools/ldb_cmd.cc + tools/ldb_tool.cc + util/logging.cc + util/log_buffer.cc + util/memenv.cc + util/murmurhash.cc + util/options.cc + util/options_helper.cc + util/options_parser.cc + util/options_sanity_check.cc + util/perf_context.cc + util/perf_level.cc + util/random.cc + util/rate_limiter.cc + util/sharded_cache.cc + util/slice.cc + util/statistics.cc + util/status.cc + util/status_message.cc + util/string_util.cc + util/sync_point.cc + util/testutil.cc + util/thread_local.cc + util/threadpool_imp.cc + util/thread_status_impl.cc + util/thread_status_updater.cc + util/thread_status_util.cc + util/thread_status_util_debug.cc + util/transaction_test_util.cc + util/xxhash.cc + utilities/backupable/backupable_db.cc + utilities/blob_db/blob_db.cc + utilities/checkpoint/checkpoint.cc + utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc + utilities/date_tiered/date_tiered_db_impl.cc + utilities/document/document_db.cc + utilities/document/json_document.cc + utilities/document/json_document_builder.cc + utilities/env_mirror.cc + utilities/geodb/geodb_impl.cc + utilities/leveldb_options/leveldb_options.cc + utilities/lua/rocks_lua_compaction_filter.cc + utilities/memory/memory_util.cc + utilities/merge_operators/string_append/stringappend.cc + utilities/merge_operators/string_append/stringappend2.cc + utilities/merge_operators/put.cc + utilities/merge_operators/max.cc + utilities/merge_operators/uint64add.cc + utilities/option_change_migration/option_change_migration.cc + utilities/options/options_util.cc + utilities/persistent_cache/block_cache_tier.cc + utilities/persistent_cache/block_cache_tier_file.cc + utilities/persistent_cache/block_cache_tier_metadata.cc + utilities/persistent_cache/persistent_cache_tier.cc + utilities/persistent_cache/volatile_tier_impl.cc + utilities/redis/redis_lists.cc + utilities/simulator_cache/sim_cache.cc + utilities/spatialdb/spatial_db.cc + utilities/table_properties_collectors/compact_on_deletion_collector.cc + utilities/transactions/optimistic_transaction_impl.cc + utilities/transactions/optimistic_transaction_db_impl.cc + utilities/transactions/transaction_base.cc + utilities/transactions/transaction_impl.cc + utilities/transactions/transaction_db_impl.cc + utilities/transactions/transaction_db_mutex_impl.cc + utilities/transactions/transaction_lock_mgr.cc + utilities/transactions/transaction_util.cc + utilities/ttl/db_ttl_impl.cc + utilities/write_batch_with_index/write_batch_with_index.cc + utilities/write_batch_with_index/write_batch_with_index_internal.cc + utilities/col_buf_encoder.cc + utilities/col_buf_decoder.cc + utilities/column_aware_encoding_util.cc +) + +if(WIN32) + list(APPEND ROCKSDB_SOURCES + port/win/io_win.cc + port/win/env_win.cc + port/win/env_default.cc + port/win/port_win.cc + port/win/win_logger.cc + port/win/win_thread.cc + port/win/xpress_win.cc) +else() + list(APPEND ROCKSDB_SOURCES + port/port_posix.cc + util/env_posix.cc + util/io_posix.cc) +endif() +SET(SOURCES) +FOREACH(s ${ROCKSDB_SOURCES}) + list(APPEND SOURCES ${ROCKSDB_SOURCE_DIR}/${s}) +ENDFOREACH() + +IF(CMAKE_VERSION VERSION_GREATER "2.8.10") + STRING(TIMESTAMP GIT_DATE_TIME "%Y-%m-%d %H:%M:%S") +ENDIF() + +CONFIGURE_FILE(${ROCKSDB_SOURCE_DIR}/util/build_version.cc.in build_version.cc @ONLY) +INCLUDE_DIRECTORIES(${ROCKSDB_SOURCE_DIR}/util) +list(APPEND SOURCES ${CMAKE_CURRENT_BINARY_DIR}/build_version.cc) + +ADD_CONVENIENCE_LIBRARY(rocksdblib ${SOURCES}) +target_link_libraries(rocksdblib ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) +IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + set_target_properties(rocksdblib PROPERTIES COMPILE_FLAGS "-fPIC -fno-builtin-memcmp -frtti") +endif() diff --git a/storage/rocksdb/event_listener.cc b/storage/rocksdb/event_listener.cc new file mode 100644 index 0000000000000..5854da54210a6 --- /dev/null +++ b/storage/rocksdb/event_listener.cc @@ -0,0 +1,86 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include + +/* The C++ file's header */ +#include "./event_listener.h" + +/* C++ standard header files */ +#include +#include + +/* MySQL includes */ +#include +#include + +/* MyRocks includes */ +#include "./ha_rocksdb.h" +#include "./properties_collector.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +static std::vector +extract_index_stats(const std::vector &files, + const rocksdb::TablePropertiesCollection &props) { + std::vector ret; + for (auto fn : files) { + const auto it = props.find(fn); + DBUG_ASSERT(it != props.end()); + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(it->second, &stats); + ret.insert(ret.end(), stats.begin(), stats.end()); + } + return ret; +} + +void Rdb_event_listener::update_index_stats( + const rocksdb::TableProperties &props) { + DBUG_ASSERT(m_ddl_manager != nullptr); + const auto tbl_props = + std::make_shared(props); + + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(tbl_props, &stats); + + m_ddl_manager->adjust_stats(stats); +} + +void Rdb_event_listener::OnCompactionCompleted( + rocksdb::DB *db, const rocksdb::CompactionJobInfo &ci) { + DBUG_ASSERT(db != nullptr); + DBUG_ASSERT(m_ddl_manager != nullptr); + + if (ci.status.ok()) { + m_ddl_manager->adjust_stats( + extract_index_stats(ci.output_files, ci.table_properties), + extract_index_stats(ci.input_files, ci.table_properties)); + } +} + +void Rdb_event_listener::OnFlushCompleted( + rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) { + DBUG_ASSERT(db != nullptr); + update_index_stats(flush_job_info.table_properties); +} + +void Rdb_event_listener::OnExternalFileIngested( + rocksdb::DB *db, const rocksdb::ExternalFileIngestionInfo &info) { + DBUG_ASSERT(db != nullptr); + update_index_stats(info.table_properties); +} +} // namespace myrocks diff --git a/storage/rocksdb/event_listener.h b/storage/rocksdb/event_listener.h new file mode 100644 index 0000000000000..d535031644bc7 --- /dev/null +++ b/storage/rocksdb/event_listener.h @@ -0,0 +1,46 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include "rocksdb/listener.h" + +namespace myrocks { + +class Rdb_ddl_manager; + +class Rdb_event_listener : public rocksdb::EventListener { +public: + Rdb_event_listener(const Rdb_event_listener &) = delete; + Rdb_event_listener &operator=(const Rdb_event_listener &) = delete; + + explicit Rdb_event_listener(Rdb_ddl_manager *const ddl_manager) + : m_ddl_manager(ddl_manager) {} + + void OnCompactionCompleted(rocksdb::DB *db, + const rocksdb::CompactionJobInfo &ci) override; + void OnFlushCompleted(rocksdb::DB *db, + const rocksdb::FlushJobInfo &flush_job_info) override; + void OnExternalFileIngested( + rocksdb::DB *db, + const rocksdb::ExternalFileIngestionInfo &ingestion_info) override; + +private: + Rdb_ddl_manager *m_ddl_manager; + + void update_index_stats(const rocksdb::TableProperties &props); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/get_rocksdb_files.sh b/storage/rocksdb/get_rocksdb_files.sh new file mode 100755 index 0000000000000..bd5128a86094e --- /dev/null +++ b/storage/rocksdb/get_rocksdb_files.sh @@ -0,0 +1,27 @@ +#!/bin/bash +MKFILE=`mktemp` +# create and run a simple makefile +# include rocksdb make file relative to the path of this script +echo "include ./storage/rocksdb/rocksdb/src.mk +all: + @echo \$(LIB_SOURCES)" > $MKFILE +for f in `make --makefile $MKFILE` +do + echo ./rocksdb/$f +done +rm $MKFILE + +# create build_version.cc file. Only create one if it doesn't exists or if it is different +# this is so that we don't rebuild mysqld every time +bv=storage/rocksdb/rocksdb/util/build_version.cc +date=$(date +%F) +git_sha=$(pushd storage/rocksdb/rocksdb >/dev/null && git rev-parse HEAD 2>/dev/null && popd >/dev/null) +if [ ! -f $bv ] || [ -z $git_sha ] || [ ! `grep $git_sha $bv` ] +then +echo "#include \"build_version.h\" +const char* rocksdb_build_git_sha = +\"rocksdb_build_git_sha:$git_sha\"; +const char* rocksdb_build_git_date = +\"rocksdb_build_git_date:$date\"; +const char* rocksdb_build_compile_date = __DATE__;" > $bv +fi diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc new file mode 100644 index 0000000000000..981acdf22b064 --- /dev/null +++ b/storage/rocksdb/ha_rocksdb.cc @@ -0,0 +1,10648 @@ +/* + Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#define MYSQL_SERVER 1 + +/* For use of 'PRIu64': */ +#define __STDC_FORMAT_MACROS + +#include + +#include + +/* The C++ file's header */ +#include "./ha_rocksdb.h" + +/* C++ standard header files */ +#include +#include +#include +#include +#include + +/* MySQL includes */ +#include "./debug_sync.h" +#include "./my_bit.h" +#include "./my_stacktrace.h" +#include "./sql_audit.h" +#include "./sql_table.h" +#include "./sql_hset.h" +#include +#ifdef MARIAROCKS_NOT_YET +#include +#endif +#include + +/* RocksDB includes */ +#include "rocksdb/compaction_filter.h" +#include "rocksdb/persistent_cache.h" +#include "rocksdb/rate_limiter.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/utilities/checkpoint.h" +#include "rocksdb/utilities/convenience.h" +#include "rocksdb/utilities/memory_util.h" + +/* MyRocks includes */ +#include "./event_listener.h" +#include "./ha_rocksdb_proto.h" +#include "./logger.h" +#include "./rdb_cf_manager.h" +#include "./rdb_cf_options.h" +#include "./rdb_datadic.h" +#include "./rdb_i_s.h" +#include "./rdb_index_merge.h" +#include "./rdb_mutex_wrapper.h" +#include "./rdb_psi.h" +#include "./rdb_threads.h" +#include "./rdb_mariadb_server_port.h" + +// Internal MySQL APIs not exposed in any header. +extern "C" { +/** + Mark transaction to rollback and mark error as fatal to a sub-statement. + @param thd Thread handle + @param all TRUE <=> rollback main transaction. +*/ +void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); + +/** + * Get the user thread's binary logging format + * @param thd user thread + * @return Value to be used as index into the binlog_format_names array +*/ +int thd_binlog_format(const MYSQL_THD thd); + +/** + * Check if binary logging is filtered for thread's current db. + * @param thd Thread handle + * @retval 1 the query is not filtered, 0 otherwise. +*/ +bool thd_binlog_filter_ok(const MYSQL_THD thd); +} + +namespace myrocks { + +static st_global_stats global_stats; +static st_export_stats export_stats; + +/** + Updates row counters based on the table type and operation type. +*/ +void ha_rocksdb::update_row_stats(const operation_type &type) { + DBUG_ASSERT(type < ROWS_MAX); + // Find if we are modifying system databases. + if (table->s && m_tbl_def->m_is_mysql_system_table) + global_stats.system_rows[type].inc(); + else + global_stats.rows[type].inc(); +} + +void dbug_dump_database(rocksdb::DB *db); +static handler *rocksdb_create_handler(my_core::handlerton *hton, + my_core::TABLE_SHARE *table_arg, + my_core::MEM_ROOT *mem_root); + +bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys, bool is_ascending); + +/////////////////////////////////////////////////////////// +// Parameters and settings +/////////////////////////////////////////////////////////// +static char *rocksdb_default_cf_options; +static char *rocksdb_override_cf_options; +Rdb_cf_options rocksdb_cf_options_map; + +/////////////////////////////////////////////////////////// +// Globals +/////////////////////////////////////////////////////////// +handlerton *rocksdb_hton; + +rocksdb::TransactionDB *rdb = nullptr; + +static std::shared_ptr rocksdb_stats; +static std::unique_ptr flashcache_aware_env; +static std::shared_ptr properties_collector_factory; + +Rdb_dict_manager dict_manager; +Rdb_cf_manager cf_manager; +Rdb_ddl_manager ddl_manager; +const char *m_mysql_gtid; +Rdb_binlog_manager binlog_manager; + +/** + MyRocks background thread control + N.B. This is besides RocksDB's own background threads + (@see rocksdb::CancelAllBackgroundWork()) +*/ + +static Rdb_background_thread rdb_bg_thread; + +// List of table names (using regex) that are exceptions to the strict +// collation check requirement. +Regex_list_handler *rdb_collation_exceptions; + +static const char *const ERRSTR_ROLLBACK_ONLY = + "This transaction was rolled back and cannot be " + "committed. Only supported operation is to roll it back, " + "so all pending changes will be discarded. " + "Please restart another transaction."; + +static void rocksdb_flush_all_memtables() { + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { + rdb->Flush(rocksdb::FlushOptions(), cf_handle); + } +} + +static void rocksdb_compact_column_family_stub( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) {} + +static int rocksdb_compact_column_family(THD *const thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + struct st_mysql_value *const value) { + char buff[STRING_BUFFER_USUAL_SIZE]; + int len = sizeof(buff); + + DBUG_ASSERT(value != nullptr); + + if (const char *const cf = value->val_str(value, buff, &len)) { + bool is_automatic; + auto cfh = cf_manager.get_cf(cf, "", nullptr, &is_automatic); + if (cfh != nullptr && rdb != nullptr) { + sql_print_information("RocksDB: Manual compaction of column family: %s\n", + cf); + rdb->CompactRange(rocksdb::CompactRangeOptions(), cfh, nullptr, nullptr); + } + } + return HA_EXIT_SUCCESS; +} + +/////////////////////////////////////////////////////////// +// Hash map: table name => open table handler +/////////////////////////////////////////////////////////// + +namespace // anonymous namespace = not visible outside this source file +{ + +const ulong TABLE_HASH_SIZE = 32; +typedef Hash_set Rdb_table_set; + +struct Rdb_open_tables_map { + /* Hash table used to track the handlers of open tables */ + Rdb_table_set m_hash; + /* The mutex used to protect the hash table */ + mutable mysql_mutex_t m_mutex; + + static uchar *get_hash_key(const Rdb_table_handler *const table_handler, + size_t *const length, + my_bool not_used MY_ATTRIBUTE((__unused__))); + + Rdb_table_handler *get_table_handler(const char *const table_name); + void release_table_handler(Rdb_table_handler *const table_handler); + + Rdb_open_tables_map() : m_hash(get_hash_key, system_charset_info) { } + + std::vector get_table_names(void) const; +}; + +} // anonymous namespace + +static Rdb_open_tables_map rdb_open_tables; + +static std::string rdb_normalize_dir(std::string dir) { + while (dir.size() > 0 && dir.back() == '/') { + dir.resize(dir.size() - 1); + } + return dir; +} + +static int rocksdb_create_checkpoint( + THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const save MY_ATTRIBUTE((__unused__)), + struct st_mysql_value *const value) { + char buf[FN_REFLEN]; + int len = sizeof(buf); + const char *const checkpoint_dir_raw = value->val_str(value, buf, &len); + if (checkpoint_dir_raw) { + if (rdb != nullptr) { + std::string checkpoint_dir = rdb_normalize_dir(checkpoint_dir_raw); + // NO_LINT_DEBUG + sql_print_information("RocksDB: creating checkpoint in directory : %s\n", + checkpoint_dir.c_str()); + rocksdb::Checkpoint *checkpoint; + auto status = rocksdb::Checkpoint::Create(rdb, &checkpoint); + if (status.ok()) { + status = checkpoint->CreateCheckpoint(checkpoint_dir.c_str()); + if (status.ok()) { + sql_print_information( + "RocksDB: created checkpoint in directory : %s\n", + checkpoint_dir.c_str()); + } else { + my_printf_error( + ER_UNKNOWN_ERROR, + "RocksDB: Failed to create checkpoint directory. status %d %s", + MYF(0), status.code(), status.ToString().c_str()); + } + delete checkpoint; + } else { + const std::string err_text(status.ToString()); + my_printf_error( + ER_UNKNOWN_ERROR, + "RocksDB: failed to initialize checkpoint. status %d %s\n", MYF(0), + status.code(), err_text.c_str()); + } + return status.code(); + } + } + return HA_ERR_INTERNAL_ERROR; +} + +/* This method is needed to indicate that the + ROCKSDB_CREATE_CHECKPOINT command is not read-only */ +static void rocksdb_create_checkpoint_stub(THD *const thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save) {} + +static void rocksdb_force_flush_memtable_now_stub( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) {} + +static int rocksdb_force_flush_memtable_now( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + struct st_mysql_value *const value) { + sql_print_information("RocksDB: Manual memtable flush\n"); + rocksdb_flush_all_memtables(); + return HA_EXIT_SUCCESS; +} + +static void rocksdb_drop_index_wakeup_thread( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save); + +static my_bool rocksdb_pause_background_work = 0; +static mysql_mutex_t rdb_sysvars_mutex; + +static void rocksdb_set_pause_background_work( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + const bool pause_requested = *static_cast(save); + if (rocksdb_pause_background_work != pause_requested) { + if (pause_requested) { + rdb->PauseBackgroundWork(); + } else { + rdb->ContinueBackgroundWork(); + } + rocksdb_pause_background_work = pause_requested; + } + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +static void rocksdb_set_compaction_options(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save); + +static void rocksdb_set_table_stats_sampling_pct(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); + +static void rocksdb_set_rate_limiter_bytes_per_sec(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); + +static void rocksdb_set_delayed_write_rate(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save); + +static void rdb_set_collation_exception_list(const char *exception_list); +static void rocksdb_set_collation_exception_list(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); + +static void +rocksdb_set_bulk_load(THD *thd, + struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), + void *var_ptr, const void *save); + +static void rocksdb_set_max_background_compactions( + THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save); +////////////////////////////////////////////////////////////////////////////// +// Options definitions +////////////////////////////////////////////////////////////////////////////// +static long long rocksdb_block_cache_size; +/* Use unsigned long long instead of uint64_t because of MySQL compatibility */ +static unsigned long long // NOLINT(runtime/int) + rocksdb_rate_limiter_bytes_per_sec; +static unsigned long long rocksdb_delayed_write_rate; +static unsigned long // NOLINT(runtime/int) + rocksdb_persistent_cache_size_mb; +static ulong rocksdb_info_log_level; +static char *rocksdb_wal_dir; +static char *rocksdb_persistent_cache_path; +static ulong rocksdb_index_type; +static char rocksdb_background_sync; +static uint32_t rocksdb_debug_optimizer_n_rows; +static my_bool rocksdb_force_compute_memtable_stats; +static my_bool rocksdb_debug_optimizer_no_zero_cardinality; +static uint32_t rocksdb_wal_recovery_mode; +static uint32_t rocksdb_access_hint_on_compaction_start; +static char *rocksdb_compact_cf_name; +static char *rocksdb_checkpoint_name; +static my_bool rocksdb_signal_drop_index_thread; +static my_bool rocksdb_strict_collation_check = 1; +static my_bool rocksdb_enable_2pc = 0; +static char *rocksdb_strict_collation_exceptions; +static my_bool rocksdb_collect_sst_properties = 1; +static my_bool rocksdb_force_flush_memtable_now_var = 0; +static uint64_t rocksdb_number_stat_computes = 0; +static uint32_t rocksdb_seconds_between_stat_computes = 3600; +static long long rocksdb_compaction_sequential_deletes = 0l; +static long long rocksdb_compaction_sequential_deletes_window = 0l; +static long long rocksdb_compaction_sequential_deletes_file_size = 0l; +static uint32_t rocksdb_validate_tables = 1; +static char *rocksdb_datadir; +static uint32_t rocksdb_table_stats_sampling_pct; +static my_bool rocksdb_enable_bulk_load_api = 1; +static my_bool rocksdb_print_snapshot_conflict_queries = 0; + +char *compression_types_val= + const_cast(get_rocksdb_supported_compression_types()); + +std::atomic rocksdb_snapshot_conflict_errors(0); +std::atomic rocksdb_wal_group_syncs(0); + +static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) { + rocksdb::DBOptions o; + + o.create_if_missing = true; + o.listeners.push_back(std::make_shared(&ddl_manager)); + o.info_log_level = rocksdb::InfoLogLevel::INFO_LEVEL; + o.max_subcompactions = DEFAULT_SUBCOMPACTIONS; + + return o; +} + +static rocksdb::DBOptions rocksdb_db_options = rdb_init_rocksdb_db_options(); +static rocksdb::BlockBasedTableOptions rocksdb_tbl_options; + +static std::shared_ptr rocksdb_rate_limiter; + +/* This enum needs to be kept up to date with rocksdb::InfoLogLevel */ +static const char *info_log_level_names[] = {"debug_level", "info_level", + "warn_level", "error_level", + "fatal_level", NullS}; + +static TYPELIB info_log_level_typelib = { + array_elements(info_log_level_names) - 1, "info_log_level_typelib", + info_log_level_names, nullptr}; + +static void rocksdb_set_rocksdb_info_log_level( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) { + DBUG_ASSERT(save != nullptr); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + rocksdb_info_log_level = *static_cast(save); + rocksdb_db_options.info_log->SetInfoLogLevel( + static_cast(rocksdb_info_log_level)); + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS}; + +static TYPELIB index_type_typelib = {array_elements(index_type_names) - 1, + "index_type_typelib", index_type_names, + nullptr}; + +const ulong RDB_MAX_LOCK_WAIT_SECONDS = 1024 * 1024 * 1024; +const ulong RDB_MAX_ROW_LOCKS = 1024 * 1024 * 1024; +const ulong RDB_DEFAULT_BULK_LOAD_SIZE = 1000; +const ulong RDB_MAX_BULK_LOAD_SIZE = 1024 * 1024 * 1024; +const size_t RDB_DEFAULT_MERGE_BUF_SIZE = 64 * 1024 * 1024; +const size_t RDB_MIN_MERGE_BUF_SIZE = 100; +const size_t RDB_DEFAULT_MERGE_COMBINE_READ_SIZE = 1024 * 1024 * 1024; +const size_t RDB_MIN_MERGE_COMBINE_READ_SIZE = 100; +const int64 RDB_DEFAULT_BLOCK_CACHE_SIZE = 512 * 1024 * 1024; +const int64 RDB_MIN_BLOCK_CACHE_SIZE = 1024; +const int RDB_MAX_CHECKSUMS_PCT = 100; + +// TODO: 0 means don't wait at all, and we don't support it yet? +static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG, + "Number of seconds to wait for lock", nullptr, + nullptr, /*default*/ 1, /*min*/ 1, + /*max*/ RDB_MAX_LOCK_WAIT_SECONDS, 0); + +static MYSQL_THDVAR_BOOL(deadlock_detect, PLUGIN_VAR_RQCMDARG, + "Enables deadlock detection", nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL( + trace_sst_api, PLUGIN_VAR_RQCMDARG, + "Generate trace output in the log for each call to the SstFileWriter", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL( + bulk_load, PLUGIN_VAR_RQCMDARG, + "Use bulk-load mode for inserts. This disables " + "unique_checks and enables rocksdb_commit_in_the_middle.", + nullptr, rocksdb_set_bulk_load, FALSE); + +static MYSQL_SYSVAR_BOOL(enable_bulk_load_api, rocksdb_enable_bulk_load_api, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Enables using SstFileWriter for bulk loading", + nullptr, nullptr, rocksdb_enable_bulk_load_api); + +static MYSQL_THDVAR_STR(tmpdir, PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_MEMALLOC, + "Directory for temporary files during DDL operations.", + nullptr, nullptr, ""); + +static MYSQL_THDVAR_STR( + skip_unique_check_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "Skip unique constraint checking for the specified tables", nullptr, + nullptr, ".*"); + +static MYSQL_THDVAR_BOOL( + commit_in_the_middle, PLUGIN_VAR_RQCMDARG, + "Commit rows implicitly every rocksdb_bulk_load_size, on bulk load/insert, " + "update and delete", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_BOOL( + blind_delete_primary_key, PLUGIN_VAR_RQCMDARG, + "Deleting rows by primary key lookup, without reading rows (Blind Deletes)." + " Blind delete is disabled if the table has secondary key", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_STR( + read_free_rpl_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "List of tables that will use read-free replication on the slave " + "(i.e. not lookup a row during replication)", + nullptr, nullptr, ""); + +static MYSQL_THDVAR_BOOL(skip_bloom_filter_on_read, PLUGIN_VAR_RQCMDARG, + "Skip using bloom filter for reads", nullptr, nullptr, + FALSE); + +static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG, + "Maximum number of locks a transaction can have", + nullptr, nullptr, + /*default*/ RDB_MAX_ROW_LOCKS, + /*min*/ 1, + /*max*/ RDB_MAX_ROW_LOCKS, 0); + +static MYSQL_THDVAR_BOOL( + lock_scanned_rows, PLUGIN_VAR_RQCMDARG, + "Take and hold locks on rows that are scanned but not updated", nullptr, + nullptr, FALSE); + +static MYSQL_THDVAR_ULONG(bulk_load_size, PLUGIN_VAR_RQCMDARG, + "Max #records in a batch for bulk-load mode", nullptr, + nullptr, + /*default*/ RDB_DEFAULT_BULK_LOAD_SIZE, + /*min*/ 1, + /*max*/ RDB_MAX_BULK_LOAD_SIZE, 0); + +static MYSQL_THDVAR_ULONGLONG( + merge_buf_size, PLUGIN_VAR_RQCMDARG, + "Size to allocate for merge sort buffers written out to disk " + "during inplace index creation.", + nullptr, nullptr, + /* default (64MB) */ RDB_DEFAULT_MERGE_BUF_SIZE, + /* min (100B) */ RDB_MIN_MERGE_BUF_SIZE, + /* max */ SIZE_T_MAX, 1); + +static MYSQL_THDVAR_ULONGLONG( + merge_combine_read_size, PLUGIN_VAR_RQCMDARG, + "Size that we have to work with during combine (reading from disk) phase " + "of " + "external sort during fast index creation.", + nullptr, nullptr, + /* default (1GB) */ RDB_DEFAULT_MERGE_COMBINE_READ_SIZE, + /* min (100B) */ RDB_MIN_MERGE_COMBINE_READ_SIZE, + /* max */ SIZE_T_MAX, 1); + +static MYSQL_SYSVAR_BOOL( + create_if_missing, + *reinterpret_cast(&rocksdb_db_options.create_if_missing), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::create_if_missing for RocksDB", nullptr, nullptr, + rocksdb_db_options.create_if_missing); + +static MYSQL_SYSVAR_BOOL( + create_missing_column_families, + *reinterpret_cast( + &rocksdb_db_options.create_missing_column_families), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::create_missing_column_families for RocksDB", nullptr, nullptr, + rocksdb_db_options.create_missing_column_families); + +static MYSQL_SYSVAR_BOOL( + error_if_exists, + *reinterpret_cast(&rocksdb_db_options.error_if_exists), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::error_if_exists for RocksDB", nullptr, nullptr, + rocksdb_db_options.error_if_exists); + +static MYSQL_SYSVAR_BOOL( + paranoid_checks, + *reinterpret_cast(&rocksdb_db_options.paranoid_checks), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::paranoid_checks for RocksDB", nullptr, nullptr, + rocksdb_db_options.paranoid_checks); + +static MYSQL_SYSVAR_ULONGLONG( + rate_limiter_bytes_per_sec, rocksdb_rate_limiter_bytes_per_sec, + PLUGIN_VAR_RQCMDARG, "DBOptions::rate_limiter bytes_per_sec for RocksDB", + nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, + /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); + +static MYSQL_SYSVAR_ULONGLONG(delayed_write_rate, rocksdb_delayed_write_rate, + PLUGIN_VAR_RQCMDARG, + "DBOptions::delayed_write_rate", nullptr, + rocksdb_set_delayed_write_rate, + rocksdb_db_options.delayed_write_rate, 0, + UINT64_MAX, 0); + +static MYSQL_SYSVAR_ENUM( + info_log_level, rocksdb_info_log_level, PLUGIN_VAR_RQCMDARG, + "Filter level for info logs to be written mysqld error log. " + "Valid values include 'debug_level', 'info_level', 'warn_level'" + "'error_level' and 'fatal_level'.", + nullptr, rocksdb_set_rocksdb_info_log_level, + rocksdb::InfoLogLevel::ERROR_LEVEL, &info_log_level_typelib); + +static MYSQL_THDVAR_INT( + perf_context_level, PLUGIN_VAR_RQCMDARG, + "Perf Context Level for rocksdb internal timer stat collection", nullptr, + nullptr, + /* default */ rocksdb::PerfLevel::kUninitialized, + /* min */ rocksdb::PerfLevel::kUninitialized, + /* max */ rocksdb::PerfLevel::kOutOfBounds - 1, 0); + +static MYSQL_SYSVAR_UINT( + wal_recovery_mode, rocksdb_wal_recovery_mode, PLUGIN_VAR_RQCMDARG, + "DBOptions::wal_recovery_mode for RocksDB. Default is kAbsoluteConsistency", + nullptr, nullptr, + /* default */ (uint)rocksdb::WALRecoveryMode::kAbsoluteConsistency, + /* min */ (uint)rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords, + /* max */ (uint)rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); + +static MYSQL_SYSVAR_SIZE_T(compaction_readahead_size, + rocksdb_db_options.compaction_readahead_size, + PLUGIN_VAR_RQCMDARG, + "DBOptions::compaction_readahead_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.compaction_readahead_size, + /* min */ 0L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + new_table_reader_for_compaction_inputs, + *reinterpret_cast( + &rocksdb_db_options.new_table_reader_for_compaction_inputs), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::new_table_reader_for_compaction_inputs for RocksDB", nullptr, + nullptr, rocksdb_db_options.new_table_reader_for_compaction_inputs); + +static MYSQL_SYSVAR_UINT( + access_hint_on_compaction_start, rocksdb_access_hint_on_compaction_start, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::access_hint_on_compaction_start for RocksDB", nullptr, nullptr, + /* default */ (uint)rocksdb::Options::AccessHint::NORMAL, + /* min */ (uint)rocksdb::Options::AccessHint::NONE, + /* max */ (uint)rocksdb::Options::AccessHint::WILLNEED, 0); + +static MYSQL_SYSVAR_BOOL( + allow_concurrent_memtable_write, + *reinterpret_cast( + &rocksdb_db_options.allow_concurrent_memtable_write), + PLUGIN_VAR_RQCMDARG, + "DBOptions::allow_concurrent_memtable_write for RocksDB", nullptr, nullptr, + false); + +static MYSQL_SYSVAR_BOOL( + enable_write_thread_adaptive_yield, + *reinterpret_cast( + &rocksdb_db_options.enable_write_thread_adaptive_yield), + PLUGIN_VAR_RQCMDARG, + "DBOptions::enable_write_thread_adaptive_yield for RocksDB", nullptr, + nullptr, false); + +static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options.max_open_files, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_open_files for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_open_files, + /* min */ -1, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_UINT64_T(max_total_wal_size, + rocksdb_db_options.max_total_wal_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_total_wal_size for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_total_wal_size, + /* min */ 0, /* max */ LONGLONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + use_fsync, *reinterpret_cast(&rocksdb_db_options.use_fsync), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_fsync for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_fsync); + +static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::wal_dir for RocksDB", nullptr, nullptr, + rocksdb_db_options.wal_dir.c_str()); + +static MYSQL_SYSVAR_STR( + persistent_cache_path, rocksdb_persistent_cache_path, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path for BlockBasedTableOptions::persistent_cache for RocksDB", nullptr, + nullptr, ""); + +static MYSQL_SYSVAR_ULONG( + persistent_cache_size_mb, rocksdb_persistent_cache_size_mb, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of cache in MB for BlockBasedTableOptions::persistent_cache " + "for RocksDB", nullptr, nullptr, rocksdb_persistent_cache_size_mb, + /* min */ 0L, /* max */ ULONG_MAX, 0); + +static MYSQL_SYSVAR_UINT64_T( + delete_obsolete_files_period_micros, + rocksdb_db_options.delete_obsolete_files_period_micros, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::delete_obsolete_files_period_micros for RocksDB", nullptr, + nullptr, rocksdb_db_options.delete_obsolete_files_period_micros, + /* min */ 0, /* max */ LONGLONG_MAX, 0); + +static MYSQL_SYSVAR_INT(base_background_compactions, + rocksdb_db_options.base_background_compactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::base_background_compactions for RocksDB", + nullptr, nullptr, + rocksdb_db_options.base_background_compactions, + /* min */ -1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); + +static MYSQL_SYSVAR_INT(max_background_compactions, + rocksdb_db_options.max_background_compactions, + PLUGIN_VAR_RQCMDARG, + "DBOptions::max_background_compactions for RocksDB", + nullptr, rocksdb_set_max_background_compactions, + rocksdb_db_options.max_background_compactions, + /* min */ 1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); + +static MYSQL_SYSVAR_INT(max_background_flushes, + rocksdb_db_options.max_background_flushes, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_background_flushes for RocksDB", + nullptr, nullptr, + rocksdb_db_options.max_background_flushes, + /* min */ 1, /* max */ MAX_BACKGROUND_FLUSHES, 0); + +static MYSQL_SYSVAR_UINT(max_subcompactions, + rocksdb_db_options.max_subcompactions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_subcompactions for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_subcompactions, + /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); + +static MYSQL_SYSVAR_SIZE_T(max_log_file_size, + rocksdb_db_options.max_log_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_log_file_size for RocksDB", nullptr, + nullptr, rocksdb_db_options.max_log_file_size, + /* min */ 0L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_SIZE_T(log_file_time_to_roll, + rocksdb_db_options.log_file_time_to_roll, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::log_file_time_to_roll for RocksDB", + nullptr, nullptr, + rocksdb_db_options.log_file_time_to_roll, + /* min */ 0L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_SIZE_T(keep_log_file_num, + rocksdb_db_options.keep_log_file_num, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::keep_log_file_num for RocksDB", nullptr, + nullptr, rocksdb_db_options.keep_log_file_num, + /* min */ 0L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_UINT64_T(max_manifest_file_size, + rocksdb_db_options.max_manifest_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::max_manifest_file_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.max_manifest_file_size, + /* min */ 0L, /* max */ ULONGLONG_MAX, 0); + +static MYSQL_SYSVAR_INT(table_cache_numshardbits, + rocksdb_db_options.table_cache_numshardbits, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::table_cache_numshardbits for RocksDB", + nullptr, nullptr, + rocksdb_db_options.table_cache_numshardbits, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_UINT64_T(wal_ttl_seconds, rocksdb_db_options.WAL_ttl_seconds, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::WAL_ttl_seconds for RocksDB", nullptr, + nullptr, rocksdb_db_options.WAL_ttl_seconds, + /* min */ 0L, /* max */ LONGLONG_MAX, 0); + +static MYSQL_SYSVAR_UINT64_T(wal_size_limit_mb, + rocksdb_db_options.WAL_size_limit_MB, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::WAL_size_limit_MB for RocksDB", nullptr, + nullptr, rocksdb_db_options.WAL_size_limit_MB, + /* min */ 0L, /* max */ LONGLONG_MAX, 0); + +static MYSQL_SYSVAR_SIZE_T(manifest_preallocation_size, + rocksdb_db_options.manifest_preallocation_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::manifest_preallocation_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.manifest_preallocation_size, + /* min */ 0L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + use_direct_reads, + *reinterpret_cast(&rocksdb_db_options.use_direct_reads), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_direct_reads for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_direct_reads); + +static MYSQL_SYSVAR_BOOL( + use_direct_writes, + *reinterpret_cast(&rocksdb_db_options.use_direct_writes), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_direct_writes for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_direct_writes); + +static MYSQL_SYSVAR_BOOL( + allow_mmap_reads, + *reinterpret_cast(&rocksdb_db_options.allow_mmap_reads), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_mmap_reads for RocksDB", nullptr, nullptr, + rocksdb_db_options.allow_mmap_reads); + +static MYSQL_SYSVAR_BOOL( + allow_mmap_writes, + *reinterpret_cast(&rocksdb_db_options.allow_mmap_writes), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::allow_mmap_writes for RocksDB", nullptr, nullptr, + rocksdb_db_options.allow_mmap_writes); + +static MYSQL_SYSVAR_BOOL( + is_fd_close_on_exec, + *reinterpret_cast(&rocksdb_db_options.is_fd_close_on_exec), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::is_fd_close_on_exec for RocksDB", nullptr, nullptr, + rocksdb_db_options.is_fd_close_on_exec); + +static MYSQL_SYSVAR_UINT(stats_dump_period_sec, + rocksdb_db_options.stats_dump_period_sec, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::stats_dump_period_sec for RocksDB", + nullptr, nullptr, + rocksdb_db_options.stats_dump_period_sec, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + advise_random_on_open, + *reinterpret_cast(&rocksdb_db_options.advise_random_on_open), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::advise_random_on_open for RocksDB", nullptr, nullptr, + rocksdb_db_options.advise_random_on_open); + +static MYSQL_SYSVAR_SIZE_T(db_write_buffer_size, + rocksdb_db_options.db_write_buffer_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::db_write_buffer_size for RocksDB", + nullptr, nullptr, + rocksdb_db_options.db_write_buffer_size, + /* min */ 0L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + use_adaptive_mutex, + *reinterpret_cast(&rocksdb_db_options.use_adaptive_mutex), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::use_adaptive_mutex for RocksDB", nullptr, nullptr, + rocksdb_db_options.use_adaptive_mutex); + +static MYSQL_SYSVAR_UINT64_T(bytes_per_sync, rocksdb_db_options.bytes_per_sync, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::bytes_per_sync for RocksDB", nullptr, + nullptr, rocksdb_db_options.bytes_per_sync, + /* min */ 0L, /* max */ ULONGLONG_MAX, 0); + +static MYSQL_SYSVAR_UINT64_T(wal_bytes_per_sync, + rocksdb_db_options.wal_bytes_per_sync, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::wal_bytes_per_sync for RocksDB", nullptr, + nullptr, rocksdb_db_options.wal_bytes_per_sync, + /* min */ 0L, /* max */ ULONGLONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + enable_thread_tracking, + *reinterpret_cast(&rocksdb_db_options.enable_thread_tracking), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "DBOptions::enable_thread_tracking for RocksDB", nullptr, nullptr, + rocksdb_db_options.enable_thread_tracking); + +static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "block_cache size for RocksDB", nullptr, nullptr, + /* default */ RDB_DEFAULT_BLOCK_CACHE_SIZE, + /* min */ RDB_MIN_BLOCK_CACHE_SIZE, + /* max */ LONGLONG_MAX, + /* Block size */ RDB_MIN_BLOCK_CACHE_SIZE); + +static MYSQL_SYSVAR_BOOL( + cache_index_and_filter_blocks, + *reinterpret_cast( + &rocksdb_tbl_options.cache_index_and_filter_blocks), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB", + nullptr, nullptr, true); + +// When pin_l0_filter_and_index_blocks_in_cache is true, RocksDB will use the +// LRU cache, but will always keep the filter & idndex block's handle checked +// out (=won't call ShardedLRUCache::Release), plus the parsed out objects +// the LRU cache will never push flush them out, hence they're pinned. +// +// This fixes the mutex contention between :ShardedLRUCache::Lookup and +// ShardedLRUCache::Release which reduced the QPS ratio (QPS using secondary +// index / QPS using PK). +static MYSQL_SYSVAR_BOOL( + pin_l0_filter_and_index_blocks_in_cache, + *reinterpret_cast( + &rocksdb_tbl_options.pin_l0_filter_and_index_blocks_in_cache), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "pin_l0_filter_and_index_blocks_in_cache for RocksDB", nullptr, nullptr, + true); + +static MYSQL_SYSVAR_ENUM(index_type, rocksdb_index_type, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::index_type for RocksDB", + nullptr, nullptr, + (ulong)rocksdb_tbl_options.index_type, + &index_type_typelib); + +static MYSQL_SYSVAR_BOOL( + hash_index_allow_collision, + *reinterpret_cast( + &rocksdb_tbl_options.hash_index_allow_collision), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::hash_index_allow_collision for RocksDB", nullptr, + nullptr, rocksdb_tbl_options.hash_index_allow_collision); + +static MYSQL_SYSVAR_BOOL( + no_block_cache, + *reinterpret_cast(&rocksdb_tbl_options.no_block_cache), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::no_block_cache for RocksDB", nullptr, nullptr, + rocksdb_tbl_options.no_block_cache); + +static MYSQL_SYSVAR_SIZE_T(block_size, rocksdb_tbl_options.block_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_size for RocksDB", + nullptr, nullptr, rocksdb_tbl_options.block_size, + /* min */ 1L, /* max */ SIZE_T_MAX, 0); + +static MYSQL_SYSVAR_INT( + block_size_deviation, rocksdb_tbl_options.block_size_deviation, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_size_deviation for RocksDB", nullptr, + nullptr, rocksdb_tbl_options.block_size_deviation, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_INT( + block_restart_interval, rocksdb_tbl_options.block_restart_interval, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::block_restart_interval for RocksDB", nullptr, + nullptr, rocksdb_tbl_options.block_restart_interval, + /* min */ 1, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + whole_key_filtering, + *reinterpret_cast(&rocksdb_tbl_options.whole_key_filtering), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "BlockBasedTableOptions::whole_key_filtering for RocksDB", nullptr, nullptr, + rocksdb_tbl_options.whole_key_filtering); + +static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "default cf options for RocksDB", nullptr, nullptr, ""); + +static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "option overrides per cf for RocksDB", nullptr, nullptr, + ""); + +static MYSQL_SYSVAR_BOOL(background_sync, rocksdb_background_sync, + PLUGIN_VAR_RQCMDARG, + "turns on background syncs for RocksDB", nullptr, + nullptr, FALSE); + +static MYSQL_THDVAR_UINT(flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG, + "Sync on transaction commit. Similar to " + "innodb_flush_log_at_trx_commit. 1: sync on commit, " + "0,2: not sync on commit", + nullptr, nullptr, 1, 0, 2, 0); + +static MYSQL_THDVAR_BOOL(write_disable_wal, PLUGIN_VAR_RQCMDARG, + "WriteOptions::disableWAL for RocksDB", nullptr, + nullptr, rocksdb::WriteOptions().disableWAL); + +static MYSQL_THDVAR_BOOL( + write_ignore_missing_column_families, PLUGIN_VAR_RQCMDARG, + "WriteOptions::ignore_missing_column_families for RocksDB", nullptr, + nullptr, rocksdb::WriteOptions().ignore_missing_column_families); + +static MYSQL_THDVAR_BOOL(skip_fill_cache, PLUGIN_VAR_RQCMDARG, + "Skip filling block cache on read requests", nullptr, + nullptr, FALSE); + +static MYSQL_THDVAR_BOOL( + unsafe_for_binlog, PLUGIN_VAR_RQCMDARG, + "Allowing statement based binary logging which may break consistency", + nullptr, nullptr, FALSE); + +static MYSQL_THDVAR_UINT(records_in_range, PLUGIN_VAR_RQCMDARG, + "Used to override the result of records_in_range(). " + "Set to a positive number to override", + nullptr, nullptr, 0, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_THDVAR_UINT(force_index_records_in_range, PLUGIN_VAR_RQCMDARG, + "Used to override the result of records_in_range() " + "when FORCE INDEX is used.", + nullptr, nullptr, 0, + /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_UINT( + debug_optimizer_n_rows, rocksdb_debug_optimizer_n_rows, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR, + "Test only to override rocksdb estimates of table size in a memtable", + nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0); + +static MYSQL_SYSVAR_BOOL(force_compute_memtable_stats, + rocksdb_force_compute_memtable_stats, + PLUGIN_VAR_RQCMDARG, + "Force to always compute memtable stats", + nullptr, nullptr, TRUE); + +static MYSQL_SYSVAR_BOOL( + debug_optimizer_no_zero_cardinality, + rocksdb_debug_optimizer_no_zero_cardinality, PLUGIN_VAR_RQCMDARG, + "In case if cardinality is zero, overrides it with some value", nullptr, + nullptr, TRUE); + +static MYSQL_SYSVAR_STR(compact_cf, rocksdb_compact_cf_name, + PLUGIN_VAR_RQCMDARG, "Compact column family", + rocksdb_compact_column_family, + rocksdb_compact_column_family_stub, ""); + +static MYSQL_SYSVAR_STR(create_checkpoint, rocksdb_checkpoint_name, + PLUGIN_VAR_RQCMDARG, "Checkpoint directory", + rocksdb_create_checkpoint, + rocksdb_create_checkpoint_stub, ""); + +static MYSQL_SYSVAR_BOOL(signal_drop_index_thread, + rocksdb_signal_drop_index_thread, PLUGIN_VAR_RQCMDARG, + "Wake up drop index thread", nullptr, + rocksdb_drop_index_wakeup_thread, FALSE); + +static MYSQL_SYSVAR_BOOL(pause_background_work, rocksdb_pause_background_work, + PLUGIN_VAR_RQCMDARG, + "Disable all rocksdb background operations", nullptr, + rocksdb_set_pause_background_work, FALSE); + +static MYSQL_SYSVAR_BOOL(enable_2pc, rocksdb_enable_2pc, PLUGIN_VAR_RQCMDARG, + "Enable two phase commit for MyRocks", nullptr, + nullptr, TRUE); + +static MYSQL_SYSVAR_BOOL(strict_collation_check, rocksdb_strict_collation_check, + PLUGIN_VAR_RQCMDARG, + "Enforce case sensitive collation for MyRocks indexes", + nullptr, nullptr, TRUE); + +static MYSQL_SYSVAR_STR(strict_collation_exceptions, + rocksdb_strict_collation_exceptions, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "List of tables (using regex) that are excluded " + "from the case sensitive collation enforcement", + nullptr, rocksdb_set_collation_exception_list, ""); + +static MYSQL_SYSVAR_BOOL(collect_sst_properties, rocksdb_collect_sst_properties, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Enables collecting SST file properties on each flush", + nullptr, nullptr, rocksdb_collect_sst_properties); + +static MYSQL_SYSVAR_BOOL( + force_flush_memtable_now, rocksdb_force_flush_memtable_now_var, + PLUGIN_VAR_RQCMDARG, + "Forces memstore flush which may block all write requests so be careful", + rocksdb_force_flush_memtable_now, rocksdb_force_flush_memtable_now_stub, + FALSE); + +static MYSQL_THDVAR_BOOL( + flush_memtable_on_analyze, PLUGIN_VAR_RQCMDARG, + "Forces memtable flush on ANALZYE table to get accurate cardinality", + nullptr, nullptr, true); + +static MYSQL_SYSVAR_UINT( + seconds_between_stat_computes, rocksdb_seconds_between_stat_computes, + PLUGIN_VAR_RQCMDARG, + "Sets a number of seconds to wait between optimizer stats recomputation. " + "Only changed indexes will be refreshed.", + nullptr, nullptr, rocksdb_seconds_between_stat_computes, + /* min */ 0L, /* max */ UINT_MAX, 0); + +static MYSQL_SYSVAR_LONGLONG(compaction_sequential_deletes, + rocksdb_compaction_sequential_deletes, + PLUGIN_VAR_RQCMDARG, + "RocksDB will trigger compaction for the file if " + "it has more than this number sequential deletes " + "per window", + nullptr, rocksdb_set_compaction_options, + DEFAULT_COMPACTION_SEQUENTIAL_DELETES, + /* min */ 0L, + /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES, 0); + +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes_window, + rocksdb_compaction_sequential_deletes_window, PLUGIN_VAR_RQCMDARG, + "Size of the window for counting rocksdb_compaction_sequential_deletes", + nullptr, rocksdb_set_compaction_options, + DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW, + /* min */ 0L, /* max */ MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW, 0); + +static MYSQL_SYSVAR_LONGLONG( + compaction_sequential_deletes_file_size, + rocksdb_compaction_sequential_deletes_file_size, PLUGIN_VAR_RQCMDARG, + "Minimum file size required for compaction_sequential_deletes", nullptr, + rocksdb_set_compaction_options, 0L, + /* min */ -1L, /* max */ LONGLONG_MAX, 0); + +static MYSQL_SYSVAR_BOOL( + compaction_sequential_deletes_count_sd, + rocksdb_compaction_sequential_deletes_count_sd, PLUGIN_VAR_RQCMDARG, + "Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr, + nullptr, rocksdb_compaction_sequential_deletes_count_sd); + + +static MYSQL_SYSVAR_BOOL( + print_snapshot_conflict_queries, rocksdb_print_snapshot_conflict_queries, + PLUGIN_VAR_RQCMDARG, + "Logging queries that got snapshot conflict errors into *.err log", nullptr, + nullptr, rocksdb_print_snapshot_conflict_queries); + +static MYSQL_THDVAR_INT(checksums_pct, PLUGIN_VAR_RQCMDARG, + "How many percentages of rows to be checksummed", + nullptr, nullptr, RDB_MAX_CHECKSUMS_PCT, + /* min */ 0, /* max */ RDB_MAX_CHECKSUMS_PCT, 0); + +static MYSQL_THDVAR_BOOL(store_row_debug_checksums, PLUGIN_VAR_RQCMDARG, + "Include checksums when writing index/table records", + nullptr, nullptr, false /* default value */); + +static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, PLUGIN_VAR_RQCMDARG, + "Verify checksums when reading index/table records", + nullptr, nullptr, false /* default value */); + +static MYSQL_THDVAR_BOOL(master_skip_tx_api, PLUGIN_VAR_RQCMDARG, + "Skipping holding any lock on row access. " + "Not effective on slave.", + nullptr, nullptr, false); + +static MYSQL_SYSVAR_UINT( + validate_tables, rocksdb_validate_tables, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Verify all .frm files match all RocksDB tables (0 means no verification, " + "1 means verify and fail on error, and 2 means verify but continue", + nullptr, nullptr, 1 /* default value */, 0 /* min value */, + 2 /* max value */, 0); + +static MYSQL_SYSVAR_STR(datadir, rocksdb_datadir, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "RocksDB data directory", nullptr, nullptr, + "./.rocksdb"); + +static MYSQL_SYSVAR_STR(supported_compression_types, + compression_types_val, + PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY, + "Compression algorithms supported by RocksDB", + nullptr, nullptr, + compression_types_val); + +static MYSQL_SYSVAR_UINT( + table_stats_sampling_pct, rocksdb_table_stats_sampling_pct, + PLUGIN_VAR_RQCMDARG, + "Percentage of entries to sample when collecting statistics about table " + "properties. Specify either 0 to sample everything or percentage " + "[" STRINGIFY_ARG(RDB_TBL_STATS_SAMPLE_PCT_MIN) ".." STRINGIFY_ARG( + RDB_TBL_STATS_SAMPLE_PCT_MAX) "]. " + "By default " STRINGIFY_ARG( + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT) "% " + "of" + " e" + "nt" + "ri" + "es" + " a" + "re" + " " + "sa" + "mp" + "le" + "d" + ".", + nullptr, rocksdb_set_table_stats_sampling_pct, /* default */ + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT, /* everything */ 0, + /* max */ RDB_TBL_STATS_SAMPLE_PCT_MAX, 0); + +static const int ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE = 100; + +static struct st_mysql_sys_var *rocksdb_system_variables[] = { + MYSQL_SYSVAR(lock_wait_timeout), + MYSQL_SYSVAR(deadlock_detect), + MYSQL_SYSVAR(max_row_locks), + MYSQL_SYSVAR(lock_scanned_rows), + MYSQL_SYSVAR(bulk_load), + MYSQL_SYSVAR(skip_unique_check_tables), + MYSQL_SYSVAR(trace_sst_api), + MYSQL_SYSVAR(commit_in_the_middle), + MYSQL_SYSVAR(blind_delete_primary_key), + MYSQL_SYSVAR(read_free_rpl_tables), + MYSQL_SYSVAR(bulk_load_size), + MYSQL_SYSVAR(merge_buf_size), + MYSQL_SYSVAR(enable_bulk_load_api), + MYSQL_SYSVAR(tmpdir), + MYSQL_SYSVAR(merge_combine_read_size), + MYSQL_SYSVAR(skip_bloom_filter_on_read), + + MYSQL_SYSVAR(create_if_missing), + MYSQL_SYSVAR(create_missing_column_families), + MYSQL_SYSVAR(error_if_exists), + MYSQL_SYSVAR(paranoid_checks), + MYSQL_SYSVAR(rate_limiter_bytes_per_sec), + MYSQL_SYSVAR(delayed_write_rate), + MYSQL_SYSVAR(info_log_level), + MYSQL_SYSVAR(max_open_files), + MYSQL_SYSVAR(max_total_wal_size), + MYSQL_SYSVAR(use_fsync), + MYSQL_SYSVAR(wal_dir), + MYSQL_SYSVAR(persistent_cache_path), + MYSQL_SYSVAR(persistent_cache_size_mb), + MYSQL_SYSVAR(delete_obsolete_files_period_micros), + MYSQL_SYSVAR(base_background_compactions), + MYSQL_SYSVAR(max_background_compactions), + MYSQL_SYSVAR(max_background_flushes), + MYSQL_SYSVAR(max_log_file_size), + MYSQL_SYSVAR(max_subcompactions), + MYSQL_SYSVAR(log_file_time_to_roll), + MYSQL_SYSVAR(keep_log_file_num), + MYSQL_SYSVAR(max_manifest_file_size), + MYSQL_SYSVAR(table_cache_numshardbits), + MYSQL_SYSVAR(wal_ttl_seconds), + MYSQL_SYSVAR(wal_size_limit_mb), + MYSQL_SYSVAR(manifest_preallocation_size), + MYSQL_SYSVAR(use_direct_reads), + MYSQL_SYSVAR(use_direct_writes), + MYSQL_SYSVAR(allow_mmap_reads), + MYSQL_SYSVAR(allow_mmap_writes), + MYSQL_SYSVAR(is_fd_close_on_exec), + MYSQL_SYSVAR(stats_dump_period_sec), + MYSQL_SYSVAR(advise_random_on_open), + MYSQL_SYSVAR(db_write_buffer_size), + MYSQL_SYSVAR(use_adaptive_mutex), + MYSQL_SYSVAR(bytes_per_sync), + MYSQL_SYSVAR(wal_bytes_per_sync), + MYSQL_SYSVAR(enable_thread_tracking), + MYSQL_SYSVAR(perf_context_level), + MYSQL_SYSVAR(wal_recovery_mode), + MYSQL_SYSVAR(access_hint_on_compaction_start), + MYSQL_SYSVAR(new_table_reader_for_compaction_inputs), + MYSQL_SYSVAR(compaction_readahead_size), + MYSQL_SYSVAR(allow_concurrent_memtable_write), + MYSQL_SYSVAR(enable_write_thread_adaptive_yield), + + MYSQL_SYSVAR(block_cache_size), + MYSQL_SYSVAR(cache_index_and_filter_blocks), + MYSQL_SYSVAR(pin_l0_filter_and_index_blocks_in_cache), + MYSQL_SYSVAR(index_type), + MYSQL_SYSVAR(hash_index_allow_collision), + MYSQL_SYSVAR(no_block_cache), + MYSQL_SYSVAR(block_size), + MYSQL_SYSVAR(block_size_deviation), + MYSQL_SYSVAR(block_restart_interval), + MYSQL_SYSVAR(whole_key_filtering), + + MYSQL_SYSVAR(default_cf_options), + MYSQL_SYSVAR(override_cf_options), + + MYSQL_SYSVAR(background_sync), + + MYSQL_SYSVAR(flush_log_at_trx_commit), + MYSQL_SYSVAR(write_disable_wal), + MYSQL_SYSVAR(write_ignore_missing_column_families), + + MYSQL_SYSVAR(skip_fill_cache), + MYSQL_SYSVAR(unsafe_for_binlog), + + MYSQL_SYSVAR(records_in_range), + MYSQL_SYSVAR(force_index_records_in_range), + MYSQL_SYSVAR(debug_optimizer_n_rows), + MYSQL_SYSVAR(force_compute_memtable_stats), + MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality), + + MYSQL_SYSVAR(compact_cf), + MYSQL_SYSVAR(signal_drop_index_thread), + MYSQL_SYSVAR(pause_background_work), + MYSQL_SYSVAR(enable_2pc), + MYSQL_SYSVAR(strict_collation_check), + MYSQL_SYSVAR(strict_collation_exceptions), + MYSQL_SYSVAR(collect_sst_properties), + MYSQL_SYSVAR(force_flush_memtable_now), + MYSQL_SYSVAR(flush_memtable_on_analyze), + MYSQL_SYSVAR(seconds_between_stat_computes), + + MYSQL_SYSVAR(compaction_sequential_deletes), + MYSQL_SYSVAR(compaction_sequential_deletes_window), + MYSQL_SYSVAR(compaction_sequential_deletes_file_size), + MYSQL_SYSVAR(compaction_sequential_deletes_count_sd), + MYSQL_SYSVAR(print_snapshot_conflict_queries), + + MYSQL_SYSVAR(datadir), + MYSQL_SYSVAR(supported_compression_types), + MYSQL_SYSVAR(create_checkpoint), + + MYSQL_SYSVAR(checksums_pct), + MYSQL_SYSVAR(store_row_debug_checksums), + MYSQL_SYSVAR(verify_row_debug_checksums), + MYSQL_SYSVAR(master_skip_tx_api), + + MYSQL_SYSVAR(validate_tables), + MYSQL_SYSVAR(table_stats_sampling_pct), + nullptr}; + +static rocksdb::WriteOptions +rdb_get_rocksdb_write_options(my_core::THD *const thd) { + rocksdb::WriteOptions opt; + + opt.sync = THDVAR(thd, flush_log_at_trx_commit) == 1; + opt.disableWAL = THDVAR(thd, write_disable_wal); + opt.ignore_missing_column_families = + THDVAR(thd, write_ignore_missing_column_families); + + return opt; +} + +/////////////////////////////////////////////////////////////////////////////////////////// + +/** + @brief + Function we use in the creation of our hash to get key. +*/ + +uchar * +Rdb_open_tables_map::get_hash_key(const Rdb_table_handler *const table_handler, + size_t *const length, + my_bool not_used MY_ATTRIBUTE((__unused__))) { + *length = table_handler->m_table_name_length; + return reinterpret_cast(table_handler->m_table_name); +} + +/* + Drop index thread's control +*/ + +static Rdb_drop_index_thread rdb_drop_idx_thread; + +static void rocksdb_drop_index_wakeup_thread( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + if (*static_cast(save)) { + rdb_drop_idx_thread.signal(); + } +} + +static inline uint32_t rocksdb_perf_context_level(THD *const thd) { + DBUG_ASSERT(thd != nullptr); + + const int session_perf_context_level = THDVAR(thd, perf_context_level); + if (session_perf_context_level > rocksdb::PerfLevel::kUninitialized) { + return session_perf_context_level; + } + + /* + Fallback to global thdvar, if session specific one was not set to a valid + value. + */ + + const int global_perf_context_level = THDVAR(nullptr, perf_context_level); + if (global_perf_context_level > rocksdb::PerfLevel::kUninitialized) { + return global_perf_context_level; + } + + return rocksdb::PerfLevel::kDisable; +} + +/* + Very short (functor-like) interface to be passed to + Rdb_transaction::walk_tx_list() +*/ + +interface Rdb_tx_list_walker { + virtual ~Rdb_tx_list_walker() {} + virtual void process_tran(const Rdb_transaction *const) = 0; +}; + +/* + This is a helper class that is passed to RocksDB to get notifications when + a snapshot gets created. +*/ + +class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier { + Rdb_transaction *m_owning_tx; + + void SnapshotCreated(const rocksdb::Snapshot *snapshot) override; + +public: + Rdb_snapshot_notifier(const Rdb_snapshot_notifier &) = delete; + Rdb_snapshot_notifier &operator=(const Rdb_snapshot_notifier &) = delete; + + explicit Rdb_snapshot_notifier(Rdb_transaction *const owning_tx) + : m_owning_tx(owning_tx) {} + + // If the owning Rdb_transaction gets destructed we need to not reference + // it anymore. + void detach() { m_owning_tx = nullptr; } +}; + + +#ifdef MARIAROCKS_NOT_YET +// ER_LOCK_WAIT_TIMEOUT error also has a reason in facebook/mysql-5.6 +#endif +String timeout_message(const char *command, const char *name1, + const char *name2) +{ + String msg; + msg.append("Timeout on "); + msg.append(command); + msg.append(": "); + msg.append(name1); + if (name2 && name2[0]) + { + msg.append("."); + msg.append(name2); + } + return msg; +} + + +/* This is the base class for transactions when interacting with rocksdb. +*/ +class Rdb_transaction { +protected: + ulonglong m_write_count = 0; + ulonglong m_lock_count = 0; + + bool m_is_delayed_snapshot = false; + bool m_is_two_phase = false; + + THD *m_thd = nullptr; + + rocksdb::ReadOptions m_read_opts; + + static std::multiset s_tx_list; + static mysql_mutex_t s_tx_list_mutex; + + Rdb_io_perf *m_tbl_io_perf; + + bool m_tx_read_only = false; + + int m_timeout_sec; /* Cached value of @@rocksdb_lock_wait_timeout */ + + /* Maximum number of locks the transaction can have */ + ulonglong m_max_row_locks; + + bool m_is_tx_failed = false; + bool m_rollback_only = false; + + std::shared_ptr m_notifier; + + // This should be used only when updating binlog information. + virtual rocksdb::WriteBatchBase *get_write_batch() = 0; + virtual bool commit_no_binlog() = 0; + virtual rocksdb::Iterator * + get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle *column_family) = 0; + +public: + const char *m_mysql_log_file_name; + my_off_t m_mysql_log_offset; + const char *m_mysql_gtid; + const char *m_mysql_max_gtid; + String m_detailed_error; + int64_t m_snapshot_timestamp = 0; + bool m_ddl_transaction; + + /* + for distinction between rdb_transaction_impl and rdb_writebatch_impl + when using walk tx list + */ + virtual bool is_writebatch_trx() const = 0; + + static void init_mutex() { + mysql_mutex_init(key_mutex_tx_list, &s_tx_list_mutex, MY_MUTEX_INIT_FAST); + } + + static void term_mutex() { + DBUG_ASSERT(s_tx_list.size() == 0); + mysql_mutex_destroy(&s_tx_list_mutex); + } + + static void walk_tx_list(Rdb_tx_list_walker *walker) { + DBUG_ASSERT(walker != nullptr); + + RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); + + for (auto it : s_tx_list) + walker->process_tran(it); + + RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); + } + + int set_status_error(THD *const thd, const rocksdb::Status &s, + const Rdb_key_def &kd, Rdb_tbl_def *const tbl_def) { + DBUG_ASSERT(!s.ok()); + DBUG_ASSERT(tbl_def != nullptr); + + if (s.IsTimedOut()) { + /* + SQL layer has weird expectations. If we return an error when + doing a read in DELETE IGNORE, it will ignore the error ("because it's + an IGNORE command!) but then will fail an assert, because "error code + was returned, but no error happened". Do what InnoDB's + convert_error_code_to_mysql() does: force a statement + rollback before returning HA_ERR_LOCK_WAIT_TIMEOUT: + */ + my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/); + m_detailed_error.copy(timeout_message( + "index", tbl_def->full_tablename().c_str(), kd.get_name().c_str())); + + return HA_ERR_LOCK_WAIT_TIMEOUT; + } + + if (s.IsDeadlock()) { + my_core::thd_mark_transaction_to_rollback(thd, + false /* just statement */); + return HA_ERR_LOCK_DEADLOCK; + } else if (s.IsBusy()) { + rocksdb_snapshot_conflict_errors++; + if (rocksdb_print_snapshot_conflict_queries) { + char user_host_buff[MAX_USER_HOST_SIZE + 1]; + make_user_name(thd, user_host_buff); + // NO_LINT_DEBUG + sql_print_warning("Got snapshot conflict errors: User: %s " + "Query: %s", + user_host_buff, thd->query()); + } + return HA_ERR_LOCK_DEADLOCK; + } + + if (s.IsLockLimit()) { + return HA_ERR_ROCKSDB_TOO_MANY_LOCKS; + } + + if (s.IsIOError() || s.IsCorruption()) { + rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); + } + my_error(ER_INTERNAL_ERROR, MYF(0), s.ToString().c_str()); + return HA_ERR_INTERNAL_ERROR; + } + + THD *get_thd() const { return m_thd; } + + /* Used for tracking io_perf counters */ + void io_perf_start(Rdb_io_perf *const io_perf) { + /* + Since perf_context is tracked per thread, it is difficult and expensive + to maintain perf_context on a per table basis. Therefore, roll all + perf_context data into the first table used in a query. This works well + for single table queries and is probably good enough for queries that hit + multiple tables. + + perf_context stats gathering is started when the table lock is acquired + or when ha_rocksdb::start_stmt is called in case of LOCK TABLES. They + are recorded when the table lock is released, or when commit/rollback + is called on the transaction, whichever comes first. Table lock release + and commit/rollback can happen in different orders. In the case where + the lock is released before commit/rollback is called, an extra step to + gather stats during commit/rollback is needed. + */ + if (m_tbl_io_perf == nullptr && + io_perf->start(rocksdb_perf_context_level(m_thd))) { + m_tbl_io_perf = io_perf; + } + } + + void io_perf_end_and_record(void) { + if (m_tbl_io_perf != nullptr) { + m_tbl_io_perf->end_and_record(rocksdb_perf_context_level(m_thd)); + m_tbl_io_perf = nullptr; + } + } + + void io_perf_end_and_record(Rdb_io_perf *const io_perf) { + if (m_tbl_io_perf == io_perf) { + io_perf_end_and_record(); + } + } + + void set_params(int timeout_sec_arg, int max_row_locks_arg) { + m_timeout_sec = timeout_sec_arg; + m_max_row_locks = max_row_locks_arg; + set_lock_timeout(timeout_sec_arg); + } + + virtual void set_lock_timeout(int timeout_sec_arg) = 0; + + ulonglong get_write_count() const { return m_write_count; } + + int get_timeout_sec() const { return m_timeout_sec; } + + ulonglong get_lock_count() const { return m_lock_count; } + + virtual void set_sync(bool sync) = 0; + + virtual void release_lock(rocksdb::ColumnFamilyHandle *const column_family, + const std::string &rowkey) = 0; + + virtual bool prepare(const rocksdb::TransactionName &name) = 0; + + bool commit_or_rollback() { + bool res; + if (m_is_tx_failed) { + rollback(); + res = false; + } else + res = commit(); + return res; + } + + bool commit() { + if (get_write_count() == 0) { + rollback(); + return false; + } else if (m_rollback_only) { + /* + Transactions marked as rollback_only are expected to be rolled back at + prepare(). But there are some exceptions like below that prepare() is + never called and commit() is called instead. + 1. Binlog is disabled + 2. No modification exists in binlog cache for the transaction (#195) + In both cases, rolling back transaction is safe. Nothing is written to + binlog. + */ + my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); + rollback(); + return true; + } else { +#ifdef MARIAROCKS_NOT_YET + my_core::thd_binlog_pos(m_thd, &m_mysql_log_file_name, + &m_mysql_log_offset, &m_mysql_gtid, + &m_mysql_max_gtid); + binlog_manager.update(m_mysql_log_file_name, m_mysql_log_offset, + m_mysql_max_gtid, get_write_batch()); +#endif + return commit_no_binlog(); + } + } + + virtual void rollback() = 0; + + void snapshot_created(const rocksdb::Snapshot *const snapshot) { + DBUG_ASSERT(snapshot != nullptr); + + m_read_opts.snapshot = snapshot; + rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); + m_is_delayed_snapshot = false; + } + + virtual void acquire_snapshot(bool acquire_now) = 0; + virtual void release_snapshot() = 0; + + bool has_snapshot() const { return m_read_opts.snapshot != nullptr; } + +private: + // The tables we are currently loading. In a partitioned table this can + // have more than one entry + std::vector m_curr_bulk_load; + +public: + int finish_bulk_load() { + int rc = 0; + + std::vector::iterator it; + while ((it = m_curr_bulk_load.begin()) != m_curr_bulk_load.end()) { + int rc2 = (*it)->finalize_bulk_load(); + if (rc2 != 0 && rc == 0) { + rc = rc2; + } + } + + DBUG_ASSERT(m_curr_bulk_load.size() == 0); + + return rc; + } + + void start_bulk_load(ha_rocksdb *const bulk_load) { + /* + If we already have an open bulk load of a table and the name doesn't + match the current one, close out the currently running one. This allows + multiple bulk loads to occur on a partitioned table, but then closes + them all out when we switch to another table. + */ + DBUG_ASSERT(bulk_load != nullptr); + + if (!m_curr_bulk_load.empty() && + !bulk_load->same_table(*m_curr_bulk_load[0])) { + const auto res = finish_bulk_load(); + SHIP_ASSERT(res == 0); + } + + m_curr_bulk_load.push_back(bulk_load); + } + + void end_bulk_load(ha_rocksdb *const bulk_load) { + for (auto it = m_curr_bulk_load.begin(); it != m_curr_bulk_load.end(); + it++) { + if (*it == bulk_load) { + m_curr_bulk_load.erase(it); + return; + } + } + + // Should not reach here + SHIP_ASSERT(0); + } + + int num_ongoing_bulk_load() const { return m_curr_bulk_load.size(); } + + /* + Flush the data accumulated so far. This assumes we're doing a bulk insert. + + @detail + This should work like transaction commit, except that we don't + synchronize with the binlog (there is no API that would allow to have + binlog flush the changes accumulated so far and return its current + position) + + @todo + Add test coverage for what happens when somebody attempts to do bulk + inserts while inside a multi-statement transaction. + */ + bool flush_batch() { + if (get_write_count() == 0) + return false; + + /* Commit the current transaction */ + if (commit_no_binlog()) + return true; + + /* Start another one */ + start_tx(); + return false; + } + + virtual rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + const rocksdb::Slice &value) = 0; + virtual rocksdb::Status + delete_key(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) = 0; + virtual rocksdb::Status + single_delete(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) = 0; + + virtual bool has_modifications() const = 0; + + virtual rocksdb::WriteBatchBase *get_indexed_write_batch() = 0; + /* + Return a WriteBatch that one can write to. The writes will skip any + transaction locking. The writes will NOT be visible to the transaction. + */ + rocksdb::WriteBatchBase *get_blind_write_batch() { + return get_indexed_write_batch()->GetWriteBatch(); + } + + virtual rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + std::string *value) const = 0; + virtual rocksdb::Status + get_for_update(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value, + bool exclusive) = 0; + + rocksdb::Iterator * + get_iterator(rocksdb::ColumnFamilyHandle *const column_family, + bool skip_bloom_filter, bool fill_cache, + bool read_current = false, bool create_snapshot = true) { + // Make sure we are not doing both read_current (which implies we don't + // want a snapshot) and create_snapshot which makes sure we create + // a snapshot + DBUG_ASSERT(column_family != nullptr); + DBUG_ASSERT(!read_current || !create_snapshot); + + if (create_snapshot) + acquire_snapshot(true); + + rocksdb::ReadOptions options = m_read_opts; + + if (skip_bloom_filter) { + options.total_order_seek = true; + } else { + // With this option, Iterator::Valid() returns false if key + // is outside of the prefix bloom filter range set at Seek(). + // Must not be set to true if not using bloom filter. + options.prefix_same_as_start = true; + } + options.fill_cache = fill_cache; + if (read_current) { + options.snapshot = nullptr; + } + return get_iterator(options, column_family); + } + + virtual bool is_tx_started() const = 0; + virtual void start_tx() = 0; + virtual void start_stmt() = 0; + virtual void rollback_stmt() = 0; + + void set_tx_failed(bool failed_arg) { m_is_tx_failed = failed_arg; } + + bool can_prepare() const { + if (m_rollback_only) { + my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); + return false; + } + return true; + } + + int rollback_to_savepoint(void *const savepoint) { + if (has_modifications()) { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks currently does not support ROLLBACK TO " + "SAVEPOINT if modifying rows.", + MYF(0)); + m_rollback_only = true; + return HA_EXIT_FAILURE; + } + return HA_EXIT_SUCCESS; + } + + /* + This is used by transactions started with "START TRANSACTION WITH " + "CONSISTENT [ROCKSDB] SNAPSHOT". When tx_read_only is turned on, + snapshot has to be created via DB::GetSnapshot(), not via Transaction + API. + */ + bool is_tx_read_only() const { return m_tx_read_only; } + + bool is_two_phase() const { return m_is_two_phase; } + + void set_tx_read_only(bool val) { m_tx_read_only = val; } + + explicit Rdb_transaction(THD *const thd) + : m_thd(thd), m_tbl_io_perf(nullptr) { + RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); + s_tx_list.insert(this); + RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); + } + + virtual ~Rdb_transaction() { + RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex); + s_tx_list.erase(this); + RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex); + } +}; + +/* + This is a rocksdb transaction. Its members represent the current transaction, + which consists of: + - the snapshot + - the changes we've made but are not seeing yet. + + The changes are made to individual tables, which store them here and then + this object commits them on commit. +*/ +class Rdb_transaction_impl : public Rdb_transaction { + rocksdb::Transaction *m_rocksdb_tx = nullptr; + rocksdb::Transaction *m_rocksdb_reuse_tx = nullptr; + +public: + void set_lock_timeout(int timeout_sec_arg) override { + if (m_rocksdb_tx) + m_rocksdb_tx->SetLockTimeout(rdb_convert_sec_to_ms(m_timeout_sec)); + } + + void set_sync(bool sync) override { + m_rocksdb_tx->GetWriteOptions()->sync = sync; + } + + void release_lock(rocksdb::ColumnFamilyHandle *const column_family, + const std::string &rowkey) override { + if (!THDVAR(m_thd, lock_scanned_rows)) { + m_rocksdb_tx->UndoGetForUpdate(column_family, rocksdb::Slice(rowkey)); + } + } + + virtual bool is_writebatch_trx() const override { return false; } + +private: + void release_tx(void) { + // We are done with the current active transaction object. Preserve it + // for later reuse. + DBUG_ASSERT(m_rocksdb_reuse_tx == nullptr); + m_rocksdb_reuse_tx = m_rocksdb_tx; + m_rocksdb_tx = nullptr; + } + + bool prepare(const rocksdb::TransactionName &name) override { + rocksdb::Status s; + s = m_rocksdb_tx->SetName(name); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + return false; + } + + s = m_rocksdb_tx->Prepare(); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + return false; + } + return true; + } + + bool commit_no_binlog() override { + bool res = false; + release_snapshot(); + const rocksdb::Status s = m_rocksdb_tx->Commit(); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + res = true; + } + + /* Save the transaction object to be reused */ + release_tx(); + + m_write_count = 0; + m_lock_count = 0; + set_tx_read_only(false); + m_rollback_only = false; + return res; + } + +public: + void rollback() override { + m_write_count = 0; + m_lock_count = 0; + m_ddl_transaction = false; + if (m_rocksdb_tx) { + release_snapshot(); + /* This will also release all of the locks: */ + m_rocksdb_tx->Rollback(); + + /* Save the transaction object to be reused */ + release_tx(); + + set_tx_read_only(false); + m_rollback_only = false; + } + } + + void acquire_snapshot(bool acquire_now) override { + if (m_read_opts.snapshot == nullptr) { + if (is_tx_read_only()) { + snapshot_created(rdb->GetSnapshot()); + } else if (acquire_now) { + m_rocksdb_tx->SetSnapshot(); + snapshot_created(m_rocksdb_tx->GetSnapshot()); + } else if (!m_is_delayed_snapshot) { + m_rocksdb_tx->SetSnapshotOnNextOperation(m_notifier); + m_is_delayed_snapshot = true; + } + } + } + + void release_snapshot() override { + bool need_clear = m_is_delayed_snapshot; + + if (m_read_opts.snapshot != nullptr) { + m_snapshot_timestamp = 0; + if (is_tx_read_only()) { + rdb->ReleaseSnapshot(m_read_opts.snapshot); + need_clear = false; + } else { + need_clear = true; + } + m_read_opts.snapshot = nullptr; + } + + if (need_clear && m_rocksdb_tx != nullptr) + m_rocksdb_tx->ClearSnapshot(); + } + + bool has_snapshot() { return m_read_opts.snapshot != nullptr; } + + rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + const rocksdb::Slice &value) override { + ++m_write_count; + ++m_lock_count; + if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); + return m_rocksdb_tx->Put(column_family, key, value); + } + + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { + ++m_write_count; + ++m_lock_count; + if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); + return m_rocksdb_tx->Delete(column_family, key); + } + + rocksdb::Status + single_delete(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { + ++m_write_count; + ++m_lock_count; + if (m_write_count > m_max_row_locks || m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); + return m_rocksdb_tx->SingleDelete(column_family, key); + } + + bool has_modifications() const override { + return m_rocksdb_tx->GetWriteBatch() && + m_rocksdb_tx->GetWriteBatch()->GetWriteBatch() && + m_rocksdb_tx->GetWriteBatch()->GetWriteBatch()->Count() > 0; + } + + rocksdb::WriteBatchBase *get_write_batch() override { + if (is_two_phase()) { + return m_rocksdb_tx->GetCommitTimeWriteBatch(); + } + return m_rocksdb_tx->GetWriteBatch()->GetWriteBatch(); + } + + /* + Return a WriteBatch that one can write to. The writes will skip any + transaction locking. The writes WILL be visible to the transaction. + */ + rocksdb::WriteBatchBase *get_indexed_write_batch() override { + ++m_write_count; + return m_rocksdb_tx->GetWriteBatch(); + } + + rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + std::string *value) const override { + return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); + } + + rocksdb::Status + get_for_update(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value, + bool exclusive) override { + if (++m_lock_count > m_max_row_locks) + return rocksdb::Status::Aborted(rocksdb::Status::kLockLimit); + + return m_rocksdb_tx->GetForUpdate(m_read_opts, column_family, key, value, + exclusive); + } + + rocksdb::Iterator * + get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle *const column_family) override { + return m_rocksdb_tx->GetIterator(options, column_family); + } + + const rocksdb::Transaction *get_rdb_trx() const { return m_rocksdb_tx; } + + bool is_tx_started() const override { return (m_rocksdb_tx != nullptr); } + + void start_tx() override { + rocksdb::TransactionOptions tx_opts; + rocksdb::WriteOptions write_opts; + tx_opts.set_snapshot = false; + tx_opts.lock_timeout = rdb_convert_sec_to_ms(m_timeout_sec); + tx_opts.deadlock_detect = THDVAR(m_thd, deadlock_detect); + + write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1; + write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); + write_opts.ignore_missing_column_families = + THDVAR(m_thd, write_ignore_missing_column_families); + m_is_two_phase = rocksdb_enable_2pc; + + /* + If m_rocksdb_reuse_tx is null this will create a new transaction object. + Otherwise it will reuse the existing one. + */ + m_rocksdb_tx = + rdb->BeginTransaction(write_opts, tx_opts, m_rocksdb_reuse_tx); + m_rocksdb_reuse_tx = nullptr; + + m_read_opts = rocksdb::ReadOptions(); + + m_ddl_transaction = false; + } + + /* + Start a statement inside a multi-statement transaction. + + @todo: are we sure this is called once (and not several times) per + statement start? + + For hooking to start of statement that is its own transaction, see + ha_rocksdb::external_lock(). + */ + void start_stmt() override { + // Set the snapshot to delayed acquisition (SetSnapshotOnNextOperation) + acquire_snapshot(false); + m_rocksdb_tx->SetSavePoint(); + } + + /* + This must be called when last statement is rolled back, but the transaction + continues + */ + void rollback_stmt() override { + /* TODO: here we must release the locks taken since the start_stmt() call */ + if (m_rocksdb_tx) { + const rocksdb::Snapshot *const org_snapshot = m_rocksdb_tx->GetSnapshot(); + m_rocksdb_tx->RollbackToSavePoint(); + + const rocksdb::Snapshot *const cur_snapshot = m_rocksdb_tx->GetSnapshot(); + if (org_snapshot != cur_snapshot) { + if (org_snapshot != nullptr) + m_snapshot_timestamp = 0; + + m_read_opts.snapshot = cur_snapshot; + if (cur_snapshot != nullptr) + rdb->GetEnv()->GetCurrentTime(&m_snapshot_timestamp); + else + m_is_delayed_snapshot = true; + } + } + } + + explicit Rdb_transaction_impl(THD *const thd) + : Rdb_transaction(thd), m_rocksdb_tx(nullptr) { + // Create a notifier that can be called when a snapshot gets generated. + m_notifier = std::make_shared(this); + } + + virtual ~Rdb_transaction_impl() { + rollback(); + + // Theoretically the notifier could outlive the Rdb_transaction_impl + // (because of the shared_ptr), so let it know it can't reference + // the transaction anymore. + m_notifier->detach(); + + // Free any transaction memory that is still hanging around. + delete m_rocksdb_reuse_tx; + DBUG_ASSERT(m_rocksdb_tx == nullptr); + } +}; + +/* This is a rocksdb write batch. This class doesn't hold or wait on any + transaction locks (skips rocksdb transaction API) thus giving better + performance. The commit is done through rdb->GetBaseDB()->Commit(). + + Currently this is only used for replication threads which are guaranteed + to be non-conflicting. Any further usage of this class should completely + be thought thoroughly. +*/ +class Rdb_writebatch_impl : public Rdb_transaction { + rocksdb::WriteBatchWithIndex *m_batch; + rocksdb::WriteOptions write_opts; + // Called after commit/rollback. + void reset() { + m_batch->Clear(); + m_read_opts = rocksdb::ReadOptions(); + m_ddl_transaction = false; + } + +private: + bool prepare(const rocksdb::TransactionName &name) override { return true; } + + bool commit_no_binlog() override { + bool res = false; + release_snapshot(); + const rocksdb::Status s = + rdb->GetBaseDB()->Write(write_opts, m_batch->GetWriteBatch()); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_TX_COMMIT); + res = true; + } + reset(); + + m_write_count = 0; + set_tx_read_only(false); + m_rollback_only = false; + return res; + } + +public: + bool is_writebatch_trx() const override { return true; } + + void set_lock_timeout(int timeout_sec_arg) override { + // Nothing to do here. + } + + void set_sync(bool sync) override { write_opts.sync = sync; } + + void release_lock(rocksdb::ColumnFamilyHandle *const column_family, + const std::string &rowkey) override { + // Nothing to do here since we don't hold any row locks. + } + + void rollback() override { + m_write_count = 0; + m_lock_count = 0; + release_snapshot(); + + reset(); + set_tx_read_only(false); + m_rollback_only = false; + } + + void acquire_snapshot(bool acquire_now) override { + if (m_read_opts.snapshot == nullptr) + snapshot_created(rdb->GetSnapshot()); + } + + void release_snapshot() override { + if (m_read_opts.snapshot != nullptr) { + rdb->ReleaseSnapshot(m_read_opts.snapshot); + m_read_opts.snapshot = nullptr; + } + } + + rocksdb::Status put(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + const rocksdb::Slice &value) override { + ++m_write_count; + m_batch->Put(column_family, key, value); + // Note Put/Delete in write batch doesn't return any error code. We simply + // return OK here. + return rocksdb::Status::OK(); + } + + rocksdb::Status delete_key(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { + ++m_write_count; + m_batch->Delete(column_family, key); + return rocksdb::Status::OK(); + } + + rocksdb::Status + single_delete(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) override { + ++m_write_count; + m_batch->SingleDelete(column_family, key); + return rocksdb::Status::OK(); + } + + bool has_modifications() const override { + return m_batch->GetWriteBatch()->Count() > 0; + } + + rocksdb::WriteBatchBase *get_write_batch() override { return m_batch; } + + rocksdb::WriteBatchBase *get_indexed_write_batch() override { + ++m_write_count; + return m_batch; + } + + rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, + std::string *const value) const override { + return m_batch->GetFromBatchAndDB(rdb, m_read_opts, column_family, key, + value); + } + + rocksdb::Status + get_for_update(rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value, + bool exclusive) override { + return get(column_family, key, value); + } + + rocksdb::Iterator * + get_iterator(const rocksdb::ReadOptions &options, + rocksdb::ColumnFamilyHandle *const column_family) override { + const auto it = rdb->NewIterator(options); + return m_batch->NewIteratorWithBase(it); + } + + bool is_tx_started() const override { return (m_batch != nullptr); } + + void start_tx() override { + reset(); + write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1; + write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); + write_opts.ignore_missing_column_families = + THDVAR(m_thd, write_ignore_missing_column_families); + } + + void start_stmt() override { m_batch->SetSavePoint(); } + + void rollback_stmt() override { + if (m_batch) + m_batch->RollbackToSavePoint(); + } + + explicit Rdb_writebatch_impl(THD *const thd) + : Rdb_transaction(thd), m_batch(nullptr) { + m_batch = new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, + true); + } + + virtual ~Rdb_writebatch_impl() { + rollback(); + delete m_batch; + } +}; + +void Rdb_snapshot_notifier::SnapshotCreated( + const rocksdb::Snapshot *const snapshot) { + if (m_owning_tx != nullptr) { + m_owning_tx->snapshot_created(snapshot); + } +} + +std::multiset Rdb_transaction::s_tx_list; +mysql_mutex_t Rdb_transaction::s_tx_list_mutex; + +static Rdb_transaction *&get_tx_from_thd(THD *const thd) { + return *reinterpret_cast( + my_core::thd_ha_data(thd, rocksdb_hton)); +} + +namespace { + +class Rdb_perf_context_guard { + Rdb_io_perf m_io_perf; + THD *m_thd; + +public: + Rdb_perf_context_guard(const Rdb_perf_context_guard &) = delete; + Rdb_perf_context_guard &operator=(const Rdb_perf_context_guard &) = delete; + + explicit Rdb_perf_context_guard(THD *const thd) : m_thd(thd) { + Rdb_transaction *&tx = get_tx_from_thd(m_thd); + /* + if perf_context information is already being recorded, this becomes a + no-op + */ + if (tx != nullptr) { + tx->io_perf_start(&m_io_perf); + } + } + + ~Rdb_perf_context_guard() { + Rdb_transaction *&tx = get_tx_from_thd(m_thd); + if (tx != nullptr) { + tx->io_perf_end_and_record(); + } + } +}; + +} // anonymous namespace + +/* + TODO: maybe, call this in external_lock() and store in ha_rocksdb.. +*/ + +static Rdb_transaction *get_or_create_tx(THD *const thd) { + Rdb_transaction *&tx = get_tx_from_thd(thd); + // TODO: this is called too many times.. O(#rows) + if (tx == nullptr) { + bool rpl_skip_tx_api= false; // MARIAROCKS_NOT_YET. + if ((rpl_skip_tx_api && thd->rgi_slave) || + false /* MARIAROCKS_NOT_YET: THDVAR(thd, master_skip_tx_api) && !thd->rgi_slave)*/) + { + tx = new Rdb_writebatch_impl(thd); + } + else + { + tx = new Rdb_transaction_impl(thd); + } + tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); + tx->start_tx(); + } else { + tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks)); + if (!tx->is_tx_started()) { + tx->start_tx(); + } + } + + return tx; +} + +static int rocksdb_close_connection(handlerton *const hton, THD *const thd) { + Rdb_transaction *&tx = get_tx_from_thd(thd); + if (tx != nullptr) { + int rc = tx->finish_bulk_load(); + if (rc != 0) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error %d finalizing last SST file while " + "disconnecting", + rc); + abort_with_stack_traces(); + } + + delete tx; + tx = nullptr; + } + return HA_EXIT_SUCCESS; +} + +/* + * Serializes an xid to a string so that it can + * be used as a rocksdb transaction name + */ +static std::string rdb_xid_to_string(const XID &src) { + DBUG_ASSERT(src.gtrid_length >= 0 && src.gtrid_length <= MAXGTRIDSIZE); + DBUG_ASSERT(src.bqual_length >= 0 && src.bqual_length <= MAXBQUALSIZE); + + std::string buf; + buf.reserve(RDB_XIDHDR_LEN + src.gtrid_length + src.bqual_length); + + /* + * expand formatID to fill 8 bytes if it doesn't already + * then reinterpret bit pattern as unsigned and store in network order + */ + uchar fidbuf[RDB_FORMATID_SZ]; + int64 signed_fid8 = src.formatID; + const uint64 raw_fid8 = *reinterpret_cast(&signed_fid8); + rdb_netbuf_store_uint64(fidbuf, raw_fid8); + buf.append(reinterpret_cast(fidbuf), RDB_FORMATID_SZ); + + buf.push_back(src.gtrid_length); + buf.push_back(src.bqual_length); + buf.append(src.data, (src.gtrid_length) + (src.bqual_length)); + return buf; +} + +#if 0 +// MARIAROCKS: MariaDB doesn't have flush_wal method +/** + Called by hton->flush_logs after MySQL group commit prepares a set of + transactions. +*/ +static bool rocksdb_flush_wal(handlerton* hton __attribute__((__unused__))) + DBUG_ASSERT(rdb != nullptr); + rocksdb_wal_group_syncs++; + const rocksdb::Status s = rdb->SyncWAL(); + if (!s.ok()) { + return HA_EXIT_FAILURE; + } + return HA_EXIT_SUCCESS; +} +#endif + +/** + For a slave, prepare() updates the slave_gtid_info table which tracks the + replication progress. +*/ +static int rocksdb_prepare(handlerton* hton, THD* thd, bool prepare_tx) +{ +#ifdef MARIAROCKS_NOT_YET +// This is "ASYNC_COMMIT" feature which is only in webscalesql + bool async=false; +#endif + + Rdb_transaction *&tx = get_tx_from_thd(thd); + if (!tx->can_prepare()) { + return HA_EXIT_FAILURE; + } +#ifdef MARIAROCKS_NOT_YET // disable prepare/commit + if (prepare_tx || + (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + /* We were instructed to prepare the whole transaction, or + this is an SQL statement end and autocommit is on */ + std::vector slave_gtid_info; + my_core::thd_slave_gtid_info(thd, &slave_gtid_info); + for (const auto &it : slave_gtid_info) { + rocksdb::WriteBatchBase *const write_batch = tx->get_blind_write_batch(); + binlog_manager.update_slave_gtid_info(it.id, it.db, it.gtid, write_batch); + } + + if (tx->is_two_phase()) { + if (thd->durability_property == HA_IGNORE_DURABILITY || async) { + tx->set_sync(false); + } + XID xid; + thd_get_xid(thd, reinterpret_cast(&xid)); + if (!tx->prepare(rdb_xid_to_string(xid))) { + return HA_EXIT_FAILURE; + } + if (thd->durability_property == HA_IGNORE_DURABILITY +#ifdef MARIAROCKS_NOT_YET + && + THDVAR(thd, flush_log_at_trx_commit)) { +#endif + { +#ifdef MARIAROCKS_NOT_YET + // MariaRocks: disable the + // "write/sync redo log before flushing binlog cache to file" + // feature. See a869c56d361bb44f46c0efeb11a8f03561676247 + /** + we set the log sequence as '1' just to trigger hton->flush_logs + */ + thd_store_lsn(thd, 1, DB_TYPE_ROCKSDB); +#endif + } + } + + DEBUG_SYNC(thd, "rocksdb.prepared"); + } +#endif + return HA_EXIT_SUCCESS; +} + +/** + do nothing for prepare/commit by xid + this is needed to avoid crashes in XA scenarios +*/ +static int rocksdb_commit_by_xid(handlerton *const hton, XID *const xid) { + const auto name = rdb_xid_to_string(*xid); + rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); + if (trx == nullptr) { + return HA_EXIT_FAILURE; + } + const rocksdb::Status s = trx->Commit(); + if (!s.ok()) { + return HA_EXIT_FAILURE; + } + delete trx; + return HA_EXIT_SUCCESS; +} + +static int +rocksdb_rollback_by_xid(handlerton *const hton MY_ATTRIBUTE((__unused__)), + XID *const xid) { + const auto name = rdb_xid_to_string(*xid); + rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); + if (trx == nullptr) { + return HA_EXIT_FAILURE; + } + const rocksdb::Status s = trx->Rollback(); + if (!s.ok()) { + return HA_EXIT_FAILURE; + } + delete trx; + return HA_EXIT_SUCCESS; +} + +/** + Rebuilds an XID from a serialized version stored in a string. +*/ +static void rdb_xid_from_string(const std::string &src, XID *const dst) { + DBUG_ASSERT(dst != nullptr); + uint offset = 0; + uint64 raw_fid8 = + rdb_netbuf_to_uint64(reinterpret_cast(src.data())); + const int64 signed_fid8 = *reinterpret_cast(&raw_fid8); + dst->formatID = signed_fid8; + offset += RDB_FORMATID_SZ; + dst->gtrid_length = src.at(offset); + offset += RDB_GTRID_SZ; + dst->bqual_length = src.at(offset); + offset += RDB_BQUAL_SZ; + + DBUG_ASSERT(dst->gtrid_length >= 0 && dst->gtrid_length <= MAXGTRIDSIZE); + DBUG_ASSERT(dst->bqual_length >= 0 && dst->bqual_length <= MAXBQUALSIZE); + + src.copy(dst->data, (dst->gtrid_length) + (dst->bqual_length), + RDB_XIDHDR_LEN); +} + +/** + Reading last committed binary log info from RocksDB system row. + The info is needed for crash safe slave/master to work. +*/ +static int rocksdb_recover(handlerton* hton, XID* xid_list, uint len) +#ifdef MARIAROCKS_NOT_YET + char* const binlog_file, + my_off_t *const binlog_pos, + Gtid *const binlog_max_gtid) { +#endif +{ +#ifdef MARIAROCKS_NOT_YET + if (binlog_file && binlog_pos) { + char file_buf[FN_REFLEN + 1] = {0}; + my_off_t pos; + char gtid_buf[FN_REFLEN + 1] = {0}; + if (binlog_manager.read(file_buf, &pos, gtid_buf)) { + if (is_binlog_advanced(binlog_file, *binlog_pos, file_buf, pos)) { + memcpy(binlog_file, file_buf, FN_REFLEN + 1); + *binlog_pos = pos; + fprintf(stderr, "RocksDB: Last binlog file position %llu," + " file name %s\n", + pos, file_buf); + if (*gtid_buf) { + global_sid_lock->rdlock(); + binlog_max_gtid->parse(global_sid_map, gtid_buf); + global_sid_lock->unlock(); + fprintf(stderr, "RocksDB: Last MySQL Gtid %s\n", gtid_buf); + } + } + } + } +#endif + + if (len == 0 || xid_list == nullptr) { + return HA_EXIT_SUCCESS; + } + + std::vector trans_list; + rdb->GetAllPreparedTransactions(&trans_list); + + uint count = 0; + for (auto &trans : trans_list) { + if (count >= len) { + break; + } + auto name = trans->GetName(); + rdb_xid_from_string(name, &xid_list[count]); + count++; + } + return count; +} + +static int rocksdb_commit(handlerton* hton, THD* thd, bool commit_tx) +{ + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(hton != nullptr); + DBUG_ASSERT(thd != nullptr); + + /* this will trigger saving of perf_context information */ + Rdb_perf_context_guard guard(thd); + + /* note: h->external_lock(F_UNLCK) is called after this function is called) */ + Rdb_transaction *&tx = get_tx_from_thd(thd); + + if (tx != nullptr) { + if (commit_tx || (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | + OPTION_BEGIN))) { + /* + We get here + - For a COMMIT statement that finishes a multi-statement transaction + - For a statement that has its own transaction + */ + if (tx->commit()) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } else { + /* + We get here when committing a statement within a transaction. + + We don't need to do anything here. tx->start_stmt() will notify + Rdb_transaction_impl that another statement has started. + */ + tx->set_tx_failed(false); + } + + if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { + // For READ_COMMITTED, we release any existing snapshot so that we will + // see any changes that occurred since the last statement. + tx->release_snapshot(); + } + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +static int rocksdb_rollback(handlerton *const hton, THD *const thd, + bool rollback_tx) { + Rdb_perf_context_guard guard(thd); + Rdb_transaction *&tx = get_tx_from_thd(thd); + + if (tx != nullptr) { + if (rollback_tx) { + /* + We get here, when + - ROLLBACK statement is issued. + + Discard the changes made by the transaction + */ + tx->rollback(); + } else { + /* + We get here when + - a statement with AUTOCOMMIT=1 is being rolled back (because of some + error) + - a statement inside a transaction is rolled back + */ + + tx->rollback_stmt(); + tx->set_tx_failed(true); + } + + if (my_core::thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { + // For READ_COMMITTED, we release any existing snapshot so that we will + // see any changes that occurred since the last statement. + tx->release_snapshot(); + } + } + return HA_EXIT_SUCCESS; +} + +static bool print_stats(THD *const thd, std::string const &type, + std::string const &name, std::string const &status, + stat_print_fn *stat_print) { + return stat_print(thd, type.c_str(), type.size(), name.c_str(), name.size(), + status.c_str(), status.size()); +} + +static std::string format_string(const char *const format, ...) { + std::string res; + va_list args; + va_list args_copy; + char static_buff[256]; + + DBUG_ASSERT(format != nullptr); + + va_start(args, format); + va_copy(args_copy, args); + + // Calculate how much space we will need + int len = vsnprintf(nullptr, 0, format, args); + va_end(args); + + if (len < 0) { + res = std::string(""); + } else if (len == 0) { + // Shortcut for an empty string + res = std::string(""); + } else { + // For short enough output use a static buffer + char *buff = static_buff; + std::unique_ptr dynamic_buff = nullptr; + + len++; // Add one for null terminator + + // for longer output use an allocated buffer + if (static_cast(len) > sizeof(static_buff)) { + dynamic_buff.reset(new char[len]); + buff = dynamic_buff.get(); + } + + // Now re-do the vsnprintf with the buffer which is now large enough + (void)vsnprintf(buff, len, format, args_copy); + + // Convert to a std::string. Note we could have created a std::string + // large enough and then converted the buffer to a 'char*' and created + // the output in place. This would probably work but feels like a hack. + // Since this isn't code that needs to be super-performant we are going + // with this 'safer' method. + res = std::string(buff); + } + + va_end(args_copy); + + return res; +} + +class Rdb_snapshot_status : public Rdb_tx_list_walker { +private: + std::string m_data; + + static std::string current_timestamp(void) { + static const char *const format = "%d-%02d-%02d %02d:%02d:%02d"; + time_t currtime; + struct tm currtm; + + time(&currtime); + + localtime_r(&currtime, &currtm); + + return format_string(format, currtm.tm_year + 1900, currtm.tm_mon + 1, + currtm.tm_mday, currtm.tm_hour, currtm.tm_min, + currtm.tm_sec); + } + + static std::string get_header(void) { + return "\n============================================================\n" + + current_timestamp() + + " ROCKSDB TRANSACTION MONITOR OUTPUT\n" + "============================================================\n" + "---------\n" + "SNAPSHOTS\n" + "---------\n" + "LIST OF SNAPSHOTS FOR EACH SESSION:\n"; + } + + static std::string get_footer(void) { + return "-----------------------------------------\n" + "END OF ROCKSDB TRANSACTION MONITOR OUTPUT\n" + "=========================================\n"; + } + +public: + Rdb_snapshot_status() : m_data(get_header()) {} + + std::string getResult() { return m_data + get_footer(); } + + /* Implement Rdb_transaction interface */ + /* Create one row in the snapshot status table */ + void process_tran(const Rdb_transaction *const tx) override { + DBUG_ASSERT(tx != nullptr); + + /* Calculate the duration the snapshot has existed */ + int64_t snapshot_timestamp = tx->m_snapshot_timestamp; + if (snapshot_timestamp != 0) { + int64_t curr_time; + rdb->GetEnv()->GetCurrentTime(&curr_time); + + char buffer[1024]; +#ifdef MARIAROCKS_NOT_YET + thd_security_context(tx->get_thd(), buffer, sizeof buffer, 0); +#endif + m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" + "%s\n" + "lock count %llu, write count %llu\n", + (longlong)(curr_time - snapshot_timestamp), + buffer, + tx->get_lock_count(), tx->get_write_count()); + } + } +}; + +/** + * @brief + * walks through all non-replication transactions and copies + * out relevant information for information_schema.rocksdb_trx + */ +class Rdb_trx_info_aggregator : public Rdb_tx_list_walker { +private: + std::vector *m_trx_info; + +public: + explicit Rdb_trx_info_aggregator(std::vector *const trx_info) + : m_trx_info(trx_info) {} + + void process_tran(const Rdb_transaction *const tx) override { + static const std::map state_map = { + {rocksdb::Transaction::STARTED, "STARTED"}, + {rocksdb::Transaction::AWAITING_PREPARE, "AWAITING_PREPARE"}, + {rocksdb::Transaction::PREPARED, "PREPARED"}, + {rocksdb::Transaction::AWAITING_COMMIT, "AWAITING_COMMIT"}, + {rocksdb::Transaction::COMMITED, "COMMITED"}, + {rocksdb::Transaction::AWAITING_ROLLBACK, "AWAITING_ROLLBACK"}, + {rocksdb::Transaction::ROLLEDBACK, "ROLLEDBACK"}, + }; + + DBUG_ASSERT(tx != nullptr); + + THD *const thd = tx->get_thd(); + ulong thread_id = thd_get_thread_id(thd); + + if (tx->is_writebatch_trx()) { + const auto wb_impl = static_cast(tx); + DBUG_ASSERT(wb_impl); + m_trx_info->push_back( + {"", /* name */ + 0, /* trx_id */ + wb_impl->get_write_count(), 0, /* lock_count */ + 0, /* timeout_sec */ + "", /* state */ + "", /* waiting_key */ + 0, /* waiting_cf_id */ + 1, /*is_replication */ + 1, /* skip_trx_api */ + wb_impl->is_tx_read_only(), 0, /* deadlock detection */ + wb_impl->num_ongoing_bulk_load(), thread_id, "" /* query string */}); + } else { + const auto tx_impl = static_cast(tx); + DBUG_ASSERT(tx_impl); + const rocksdb::Transaction *rdb_trx = tx_impl->get_rdb_trx(); + + if (rdb_trx == nullptr) { + return; + } + + std::string query_str; + LEX_STRING *const lex_str = thd_query_string(thd); + if (lex_str != nullptr && lex_str->str != nullptr) { + query_str = std::string(lex_str->str); + } + + const auto state_it = state_map.find(rdb_trx->GetState()); + DBUG_ASSERT(state_it != state_map.end()); +#ifdef MARIAROCKS_NOT_YET + const int is_replication = (thd->rli_slave != nullptr); +#else + const int is_replication= false; +#endif + uint32_t waiting_cf_id; + std::string waiting_key; + rdb_trx->GetWaitingTxns(&waiting_cf_id, &waiting_key), + + m_trx_info->push_back( + {rdb_trx->GetName(), rdb_trx->GetID(), tx_impl->get_write_count(), + tx_impl->get_lock_count(), tx_impl->get_timeout_sec(), + state_it->second, waiting_key, waiting_cf_id, is_replication, + 0, /* skip_trx_api */ + tx_impl->is_tx_read_only(), rdb_trx->IsDeadlockDetect(), + tx_impl->num_ongoing_bulk_load(), thread_id, query_str}); + } + } +}; + +/* + returns a vector of info for all non-replication threads + for use by information_schema.rocksdb_trx +*/ +std::vector rdb_get_all_trx_info() { + std::vector trx_info; + Rdb_trx_info_aggregator trx_info_agg(&trx_info); + Rdb_transaction::walk_tx_list(&trx_info_agg); + return trx_info; +} + +#ifdef MARIAROCKS_NOT_YET +/* Generate the snapshot status table */ +static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, + stat_print_fn *const stat_print) { + Rdb_snapshot_status showStatus; + + Rdb_transaction::walk_tx_list(&showStatus); + + /* Send the result data back to MySQL */ + return print_stats(thd, "SNAPSHOTS", "rocksdb", showStatus.getResult(), + stat_print); +} +#endif + +/* + This is called for SHOW ENGINE ROCKSDB STATUS|LOGS|etc. + + For now, produce info about live files (which gives an imprecise idea about + what column families are there) +*/ + +static bool rocksdb_show_status(handlerton *const hton, THD *const thd, + stat_print_fn *const stat_print, + enum ha_stat_type stat_type) { + bool res = false; + if (stat_type == HA_ENGINE_STATUS) { + std::string str; + + /* Per DB stats */ + if (rdb->GetProperty("rocksdb.dbstats", &str)) { + res |= print_stats(thd, "DBSTATS", "rocksdb", str, stat_print); + } + + /* Per column family stats */ + for (const auto &cf_name : cf_manager.get_cf_names()) { + rocksdb::ColumnFamilyHandle *cfh; + bool is_automatic; + + /* + Only the cf name is important. Whether it was generated automatically + does not matter, so is_automatic is ignored. + */ + cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + if (cfh == nullptr) + continue; + + if (!rdb->GetProperty(cfh, "rocksdb.cfstats", &str)) + continue; + + res |= print_stats(thd, "CF_COMPACTION", cf_name, str, stat_print); + } + + /* Memory Statistics */ + std::vector dbs; + std::unordered_set cache_set; + size_t internal_cache_count = 0; + size_t kDefaultInternalCacheSize = 8 * 1024 * 1024; + char buf[100]; + + dbs.push_back(rdb); + cache_set.insert(rocksdb_tbl_options.block_cache.get()); + for (const auto &cf_handle : cf_manager.get_all_cf()) { + rocksdb::ColumnFamilyDescriptor cf_desc; + cf_handle->GetDescriptor(&cf_desc); + auto *const table_factory = cf_desc.options.table_factory.get(); + if (table_factory != nullptr) { + std::string tf_name = table_factory->Name(); + if (tf_name.find("BlockBasedTable") != std::string::npos) { + const rocksdb::BlockBasedTableOptions *const bbt_opt = + reinterpret_cast( + table_factory->GetOptions()); + if (bbt_opt != nullptr) { + if (bbt_opt->block_cache.get() != nullptr) { + cache_set.insert(bbt_opt->block_cache.get()); + } else { + internal_cache_count++; + } + cache_set.insert(bbt_opt->block_cache_compressed.get()); + } + } + } + } + + std::map temp_usage_by_type; + str.clear(); + rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, + &temp_usage_by_type); + snprintf(buf, sizeof(buf), "\nMemTable Total: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nMemTable Unflushed: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nTable Readers Total: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kTableReadersTotal]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nCache Total: %llu", + (ulonglong)temp_usage_by_type[rocksdb::MemoryUtil::kCacheTotal]); + str.append(buf); + snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %llu", + (ulonglong)internal_cache_count * kDefaultInternalCacheSize); + str.append(buf); + res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print); +#ifdef MARIAROCKS_NOT_YET + } else if (stat_type == HA_ENGINE_TRX) { + /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */ + res |= rocksdb_show_snapshot_status(hton, thd, stat_print); +#endif + } + return res; +} + +static inline void rocksdb_register_tx(handlerton *const hton, THD *const thd, + Rdb_transaction *const tx) { + DBUG_ASSERT(tx != nullptr); + + trans_register_ha(thd, FALSE, rocksdb_hton); + if (my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + tx->start_stmt(); + trans_register_ha(thd, TRUE, rocksdb_hton); + } +} + +static const char *ha_rocksdb_exts[] = {NullS}; + +/* + Supporting START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT + + Features: + 1. Supporting START TRANSACTION WITH CONSISTENT SNAPSHOT + 2. Getting current binlog position in addition to #1. + + The second feature is done by START TRANSACTION WITH + CONSISTENT ROCKSDB SNAPSHOT. This is Facebook's extension, and + it works like existing START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT. + + - When not setting engine, START TRANSACTION WITH CONSISTENT SNAPSHOT + takes both InnoDB and RocksDB snapshots, and both InnoDB and RocksDB + participate in transaction. When executing COMMIT, both InnoDB and + RocksDB modifications are committed. Remember that XA is not supported yet, + so mixing engines is not recommended anyway. + + - When setting engine, START TRANSACTION WITH CONSISTENT.. takes + snapshot for the specified engine only. But it starts both + InnoDB and RocksDB transactions. +*/ +static int rocksdb_start_tx_and_assign_read_view( + handlerton *const hton, /*!< in: RocksDB handlerton */ + THD* thd) /*!< in: MySQL thread handle of the + user for whom the transaction should + be committed */ +{ + Rdb_perf_context_guard guard(thd); + + ulong const tx_isolation = my_core::thd_tx_isolation(thd); + + if (tx_isolation != ISO_REPEATABLE_READ) { + my_printf_error(ER_UNKNOWN_ERROR, + "Only REPEATABLE READ isolation level is supported " + "for START TRANSACTION WITH CONSISTENT SNAPSHOT " + "in RocksDB Storage Engine.", + MYF(0)); + return HA_EXIT_FAILURE; + } + /* + MariaDB: there is no need to call mysql_bin_log_lock_commits and then + unlock back. + SQL layer calls start_consistent_snapshot() for all engines, including the + binlog under LOCK_commit_ordered mutex. + The mutex prevents binlog commits from happening (right?) while the storage + engine(s) allocate read snapshots. That way, each storage engine is + synchronized with current binlog position. + */ + mysql_mutex_assert_owner(&LOCK_commit_ordered); + + Rdb_transaction *const tx = get_or_create_tx(thd); + DBUG_ASSERT(!tx->has_snapshot()); + tx->set_tx_read_only(true); + rocksdb_register_tx(hton, thd, tx); + tx->acquire_snapshot(true); + + return HA_EXIT_SUCCESS; +} + +/* Dummy SAVEPOINT support. This is needed for long running transactions + * like mysqldump (https://bugs.mysql.com/bug.php?id=71017). + * Current SAVEPOINT does not correctly handle ROLLBACK and does not return + * errors. This needs to be addressed in future versions (Issue#96). + */ +static int rocksdb_savepoint(handlerton *const hton, THD *const thd, + void *const savepoint) { + return HA_EXIT_SUCCESS; +} + +static int rocksdb_rollback_to_savepoint(handlerton *const hton, THD *const thd, + void *const savepoint) { + Rdb_transaction *&tx = get_tx_from_thd(thd); + return tx->rollback_to_savepoint(savepoint); +} + +static bool +rocksdb_rollback_to_savepoint_can_release_mdl(handlerton *const hton, + THD *const thd) { + return true; +} + +#ifdef MARIAROCKS_NOT_YET +/* + This is called for INFORMATION_SCHEMA +*/ +static void rocksdb_update_table_stats( + /* per-table stats callback */ + void (*cb)(const char *db, const char *tbl, bool is_partition, + my_io_perf_t *r, my_io_perf_t *w, my_io_perf_t *r_blob, + my_io_perf_t *r_primary, my_io_perf_t *r_secondary, + page_stats_t *page_stats, comp_stats_t *comp_stats, + int n_lock_wait, int n_lock_wait_timeout, const char *engine)) { + my_io_perf_t io_perf_read; + my_io_perf_t io_perf; + page_stats_t page_stats; + comp_stats_t comp_stats; + std::vector tablenames; + + /* + Most of these are for innodb, so setting them to 0. + TODO: possibly separate out primary vs. secondary index reads + */ + memset(&io_perf, 0, sizeof(io_perf)); + memset(&page_stats, 0, sizeof(page_stats)); + memset(&comp_stats, 0, sizeof(comp_stats)); + + tablenames = rdb_open_tables.get_table_names(); + + for (const auto &it : tablenames) { + Rdb_table_handler *table_handler; + std::string str, dbname, tablename, partname; + char dbname_sys[NAME_LEN + 1]; + char tablename_sys[NAME_LEN + 1]; + bool is_partition; + + if (rdb_normalize_tablename(it, &str)) { + /* Function needs to return void because of the interface and we've + * detected an error which shouldn't happen. There's no way to let + * caller know that something failed. + */ + SHIP_ASSERT(false); + return; + } + + if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { + continue; + } + + is_partition = (partname.size() != 0); + + table_handler = rdb_open_tables.get_table_handler(it.c_str()); + if (table_handler == nullptr) { + continue; + } + + io_perf_read.bytes = table_handler->m_io_perf_read.bytes.load(); + io_perf_read.requests = table_handler->m_io_perf_read.requests.load(); + + /* + Convert from rocksdb timer to mysql timer. RocksDB values are + in nanoseconds, but table statistics expect the value to be + in my_timer format. + */ + io_perf_read.svc_time = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.svc_time.load() / 1000); + io_perf_read.svc_time_max = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.svc_time_max.load() / 1000); + io_perf_read.wait_time = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.wait_time.load() / 1000); + io_perf_read.wait_time_max = my_core::microseconds_to_my_timer( + table_handler->m_io_perf_read.wait_time_max.load() / 1000); + io_perf_read.slow_ios = table_handler->m_io_perf_read.slow_ios.load(); + rdb_open_tables.release_table_handler(table_handler); + + /* + Table stats expects our database and table name to be in system encoding, + not filename format. Convert before calling callback. + */ + my_core::filename_to_tablename(dbname.c_str(), dbname_sys, + sizeof(dbname_sys)); + my_core::filename_to_tablename(tablename.c_str(), tablename_sys, + sizeof(tablename_sys)); + (*cb)(dbname_sys, tablename_sys, is_partition, &io_perf_read, &io_perf, + &io_perf, &io_perf, &io_perf, &page_stats, &comp_stats, 0, 0, + rocksdb_hton_name); + } +} +#endif +static rocksdb::Status check_rocksdb_options_compatibility( + const char *const dbpath, const rocksdb::Options &main_opts, + const std::vector &cf_descr) { + DBUG_ASSERT(rocksdb_datadir != nullptr); + + rocksdb::DBOptions loaded_db_opt; + std::vector loaded_cf_descs; + rocksdb::Status status = LoadLatestOptions(dbpath, rocksdb::Env::Default(), + &loaded_db_opt, &loaded_cf_descs); + + // If we're starting from scratch and there are no options saved yet then this + // is a valid case. Therefore we can't compare the current set of options to + // anything. + if (status.IsNotFound()) { + return rocksdb::Status::OK(); + } + + if (!status.ok()) { + return status; + } + + if (loaded_cf_descs.size() != cf_descr.size()) { + return rocksdb::Status::NotSupported("Mismatched size of column family " + "descriptors."); + } + + // Please see RocksDB documentation for more context about why we need to set + // user-defined functions and pointer-typed options manually. + for (size_t i = 0; i < loaded_cf_descs.size(); i++) { + loaded_cf_descs[i].options.compaction_filter = + cf_descr[i].options.compaction_filter; + loaded_cf_descs[i].options.compaction_filter_factory = + cf_descr[i].options.compaction_filter_factory; + loaded_cf_descs[i].options.comparator = cf_descr[i].options.comparator; + loaded_cf_descs[i].options.memtable_factory = + cf_descr[i].options.memtable_factory; + loaded_cf_descs[i].options.merge_operator = + cf_descr[i].options.merge_operator; + loaded_cf_descs[i].options.prefix_extractor = + cf_descr[i].options.prefix_extractor; + loaded_cf_descs[i].options.table_factory = + cf_descr[i].options.table_factory; + } + + // This is the essence of the function - determine if it's safe to open the + // database or not. + status = CheckOptionsCompatibility(dbpath, rocksdb::Env::Default(), main_opts, + loaded_cf_descs); + + return status; +} + +/* + Storage Engine initialization function, invoked when plugin is loaded. +*/ + +static int rocksdb_init_func(void *const p) { + DBUG_ENTER_FUNC(); + + // Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN. + static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes."); + + init_rocksdb_psi_keys(); + + rocksdb_hton = (handlerton *)p; + mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex, + MY_MUTEX_INIT_FAST); +#ifdef HAVE_PSI_INTERFACE + rdb_bg_thread.init(rdb_signal_bg_psi_mutex_key, rdb_signal_bg_psi_cond_key); + rdb_drop_idx_thread.init(rdb_signal_drop_idx_psi_mutex_key, + rdb_signal_drop_idx_psi_cond_key); +#else + rdb_bg_thread.init(); + rdb_drop_idx_thread.init(); +#endif + mysql_mutex_init(rdb_collation_data_mutex_key, &rdb_collation_data_mutex, + MY_MUTEX_INIT_FAST); + mysql_mutex_init(rdb_mem_cmp_space_mutex_key, &rdb_mem_cmp_space_mutex, + MY_MUTEX_INIT_FAST); + +#if defined(HAVE_PSI_INTERFACE) + rdb_collation_exceptions = + new Regex_list_handler(key_rwlock_collation_exception_list); +#else + rdb_collation_exceptions = new Regex_list_handler(); +#endif + + mysql_mutex_init(rdb_sysvars_psi_mutex_key, &rdb_sysvars_mutex, + MY_MUTEX_INIT_FAST); + Rdb_transaction::init_mutex(); + + rocksdb_hton->state = SHOW_OPTION_YES; + rocksdb_hton->create = rocksdb_create_handler; + rocksdb_hton->close_connection = rocksdb_close_connection; + rocksdb_hton->prepare = rocksdb_prepare; + rocksdb_hton->commit_by_xid = rocksdb_commit_by_xid; + rocksdb_hton->rollback_by_xid = rocksdb_rollback_by_xid; + rocksdb_hton->recover = rocksdb_recover; + rocksdb_hton->commit = rocksdb_commit; + rocksdb_hton->rollback = rocksdb_rollback; + rocksdb_hton->show_status = rocksdb_show_status; + rocksdb_hton->start_consistent_snapshot = + rocksdb_start_tx_and_assign_read_view; + rocksdb_hton->savepoint_set = rocksdb_savepoint; + rocksdb_hton->savepoint_rollback = rocksdb_rollback_to_savepoint; + rocksdb_hton->savepoint_rollback_can_release_mdl = + rocksdb_rollback_to_savepoint_can_release_mdl; +#ifdef MARIAROCKS_NOT_YET + rocksdb_hton->update_table_stats = rocksdb_update_table_stats; +#endif // MARIAROCKS_NOT_YET + + /* + Not needed in MariaDB: + rocksdb_hton->flush_logs = rocksdb_flush_wal; + */ + + rocksdb_hton->flags = HTON_TEMPORARY_NOT_SUPPORTED | + HTON_SUPPORTS_EXTENDED_KEYS | HTON_CAN_RECREATE; + + rocksdb_hton->tablefile_extensions= ha_rocksdb_exts; + DBUG_ASSERT(!mysqld_embedded); + + rocksdb_stats = rocksdb::CreateDBStatistics(); + rocksdb_db_options.statistics = rocksdb_stats; + + if (rocksdb_rate_limiter_bytes_per_sec != 0) { + rocksdb_rate_limiter.reset( + rocksdb::NewGenericRateLimiter(rocksdb_rate_limiter_bytes_per_sec)); + rocksdb_db_options.rate_limiter = rocksdb_rate_limiter; + } + + rocksdb_db_options.delayed_write_rate = rocksdb_delayed_write_rate; + + std::shared_ptr myrocks_logger = std::make_shared(); + rocksdb::Status s = rocksdb::CreateLoggerFromOptions( + rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log); + if (s.ok()) { + myrocks_logger->SetRocksDBLogger(rocksdb_db_options.info_log); + } + + rocksdb_db_options.info_log = myrocks_logger; + myrocks_logger->SetInfoLogLevel( + static_cast(rocksdb_info_log_level)); + rocksdb_db_options.wal_dir = rocksdb_wal_dir; + + rocksdb_db_options.wal_recovery_mode = + static_cast(rocksdb_wal_recovery_mode); + + rocksdb_db_options.access_hint_on_compaction_start = + static_cast( + rocksdb_access_hint_on_compaction_start); + + if (rocksdb_db_options.allow_mmap_reads && + rocksdb_db_options.use_direct_reads) { + // allow_mmap_reads implies !use_direct_reads and RocksDB will not open if + // mmap_reads and direct_reads are both on. (NO_LINT_DEBUG) + sql_print_error("RocksDB: Can't enable both use_direct_reads " + "and allow_mmap_reads\n"); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + if (rocksdb_db_options.allow_mmap_writes && + rocksdb_db_options.use_direct_writes) { + // See above comment for allow_mmap_reads. (NO_LINT_DEBUG) + sql_print_error("RocksDB: Can't enable both use_direct_writes " + "and allow_mmap_writes\n"); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + std::vector cf_names; + rocksdb::Status status; + status = rocksdb::DB::ListColumnFamilies(rocksdb_db_options, rocksdb_datadir, + &cf_names); + if (!status.ok()) { + /* + When we start on an empty datadir, ListColumnFamilies returns IOError, + and RocksDB doesn't provide any way to check what kind of error it was. + Checking system errno happens to work right now. + */ + if (status.IsIOError() +#ifndef _WIN32 + && errno == ENOENT +#endif + ) { + sql_print_information("RocksDB: Got ENOENT when listing column families"); + sql_print_information( + "RocksDB: assuming that we're creating a new database"); + } else { + std::string err_text = status.ToString(); + sql_print_error("RocksDB: Error listing column families: %s", + err_text.c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + } else + sql_print_information("RocksDB: %ld column families found", + cf_names.size()); + + std::vector cf_descr; + std::vector cf_handles; + + rocksdb_tbl_options.index_type = + (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type; + + if (!rocksdb_tbl_options.no_block_cache) { + rocksdb_tbl_options.block_cache = + rocksdb::NewLRUCache(rocksdb_block_cache_size); + } + // Using newer BlockBasedTable format version for better compression + // and better memory allocation. + // See: + // https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd + rocksdb_tbl_options.format_version = 2; + + if (rocksdb_collect_sst_properties) { + properties_collector_factory = + std::make_shared(&ddl_manager); + + rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + DBUG_ASSERT(rocksdb_table_stats_sampling_pct <= + RDB_TBL_STATS_SAMPLE_PCT_MAX); + properties_collector_factory->SetTableStatsSamplingPct( + rocksdb_table_stats_sampling_pct); + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); + } + + if (rocksdb_persistent_cache_size_mb > 0) { + std::shared_ptr pcache; + uint64_t cache_size_bytes= rocksdb_persistent_cache_size_mb * 1024 * 1024; + rocksdb::NewPersistentCache( + rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path), + cache_size_bytes, myrocks_logger, true, &pcache); + rocksdb_tbl_options.persistent_cache = pcache; + } else if (strlen(rocksdb_persistent_cache_path)) { + sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size_mb"); + DBUG_RETURN(1); + } + + if (!rocksdb_cf_options_map.init( + rocksdb_tbl_options, properties_collector_factory, + rocksdb_default_cf_options, rocksdb_override_cf_options)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize CF options map."); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* + If there are no column families, we're creating the new database. + Create one column family named "default". + */ + if (cf_names.size() == 0) + cf_names.push_back(DEFAULT_CF_NAME); + + std::vector compaction_enabled_cf_indices; + sql_print_information("RocksDB: Column Families at start:"); + for (size_t i = 0; i < cf_names.size(); ++i) { + rocksdb::ColumnFamilyOptions opts; + rocksdb_cf_options_map.get_cf_options(cf_names[i], &opts); + + sql_print_information(" cf=%s", cf_names[i].c_str()); + sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); + sql_print_information(" target_file_size_base=%" PRIu64, + opts.target_file_size_base); + + /* + Temporarily disable compactions to prevent a race condition where + compaction starts before compaction filter is ready. + */ + if (!opts.disable_auto_compactions) { + compaction_enabled_cf_indices.push_back(i); + opts.disable_auto_compactions = true; + } + cf_descr.push_back(rocksdb::ColumnFamilyDescriptor(cf_names[i], opts)); + } + + rocksdb::Options main_opts(rocksdb_db_options, + rocksdb_cf_options_map.get_defaults()); + +#ifdef MARIAROCKS_NOT_YET +#endif + main_opts.env->SetBackgroundThreads(main_opts.max_background_flushes, + rocksdb::Env::Priority::HIGH); + main_opts.env->SetBackgroundThreads(main_opts.max_background_compactions, + rocksdb::Env::Priority::LOW); + rocksdb::TransactionDBOptions tx_db_options; + tx_db_options.transaction_lock_timeout = 2; // 2 seconds + tx_db_options.custom_mutex_factory = std::make_shared(); + + status = + check_rocksdb_options_compatibility(rocksdb_datadir, main_opts, cf_descr); + + // We won't start if we'll determine that there's a chance of data corruption + // because of incompatible options. + if (!status.ok()) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: compatibility check against existing database " + "options failed. %s", + status.ToString().c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + status = rocksdb::TransactionDB::Open( + main_opts, tx_db_options, rocksdb_datadir, cf_descr, &cf_handles, &rdb); + + if (!status.ok()) { + std::string err_text = status.ToString(); + sql_print_error("RocksDB: Error opening instance: %s", err_text.c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + cf_manager.init(&rocksdb_cf_options_map, &cf_handles); + + if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize data dictionary."); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + if (binlog_manager.init(&dict_manager)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize binlog manager."); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + if (ddl_manager.init(&dict_manager, &cf_manager, rocksdb_validate_tables)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to initialize DDL manager."); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + Rdb_sst_info::init(rdb); + + /* + Enable auto compaction, things needed for compaction filter are finished + initializing + */ + std::vector compaction_enabled_cf_handles; + compaction_enabled_cf_handles.reserve(compaction_enabled_cf_indices.size()); + for (const auto &index : compaction_enabled_cf_indices) { + compaction_enabled_cf_handles.push_back(cf_handles[index]); + } + + status = rdb->EnableAutoCompaction(compaction_enabled_cf_handles); + + if (!status.ok()) { + const std::string err_text = status.ToString(); + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + auto err = rdb_bg_thread.create_thread(BG_THREAD_NAME +#ifdef HAVE_PSI_INTERFACE + , + rdb_background_psi_thread_key +#endif + ); + if (err != 0) { + sql_print_error("RocksDB: Couldn't start the background thread: (errno=%d)", + err); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + err = rdb_drop_idx_thread.create_thread(INDEX_THREAD_NAME +#ifdef HAVE_PSI_INTERFACE + , + rdb_drop_idx_psi_thread_key +#endif + ); + if (err != 0) { + sql_print_error("RocksDB: Couldn't start the drop index thread: (errno=%d)", + err); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + rdb_set_collation_exception_list(rocksdb_strict_collation_exceptions); + + if (rocksdb_pause_background_work) { + rdb->PauseBackgroundWork(); + } + + // NO_LINT_DEBUG + sql_print_information("RocksDB: global statistics using %s indexer", + STRINGIFY_ARG(RDB_INDEXER)); +#if defined(HAVE_SCHED_GETCPU) + if (sched_getcpu() == -1) { + // NO_LINT_DEBUG + sql_print_information( + "RocksDB: sched_getcpu() failed - " + "global statistics will use thread_id_indexer_t instead"); + } +#endif + + sql_print_information("RocksDB instance opened"); + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Storage Engine deinitialization function, invoked when plugin is unloaded. +*/ + +static int rocksdb_done_func(void *const p) { + DBUG_ENTER_FUNC(); + + int error = 0; + + // signal the drop index thread to stop + rdb_drop_idx_thread.signal(true); + + // Flush all memtables for not losing data, even if WAL is disabled. + rocksdb_flush_all_memtables(); + + // Stop all rocksdb background work + CancelAllBackgroundWork(rdb->GetBaseDB(), true); + + // Signal the background thread to stop and to persist all stats collected + // from background flushes and compactions. This will add more keys to a new + // memtable, but since the memtables were just flushed, it should not trigger + // a flush that can stall due to background threads being stopped. As long + // as these keys are stored in a WAL file, they can be retrieved on restart. + rdb_bg_thread.signal(true); + + // Wait for the background thread to finish. + auto err = rdb_bg_thread.join(); + if (err != 0) { + // We'll log the message and continue because we're shutting down and + // continuation is the optimal strategy. + // NO_LINT_DEBUG + sql_print_error("RocksDB: Couldn't stop the background thread: (errno=%d)", + err); + } + + // Wait for the drop index thread to finish. + err = rdb_drop_idx_thread.join(); + if (err != 0) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Couldn't stop the index thread: (errno=%d)", err); + } + + if (rdb_open_tables.m_hash.size()) { + // Looks like we are getting unloaded and yet we have some open tables + // left behind. + error = 1; + } + + /* + destructors for static objects can be called at _exit(), + but we want to free the memory at dlclose() + */ + rdb_open_tables.m_hash.~Rdb_table_set(); + mysql_mutex_destroy(&rdb_open_tables.m_mutex); + mysql_mutex_destroy(&rdb_sysvars_mutex); + + + delete rdb_collation_exceptions; + + mysql_mutex_destroy(&rdb_collation_data_mutex); + mysql_mutex_destroy(&rdb_mem_cmp_space_mutex); + + Rdb_transaction::term_mutex(); + + for (auto &it : rdb_collation_data) { + delete it; + it = nullptr; + } + + ddl_manager.cleanup(); + binlog_manager.cleanup(); + dict_manager.cleanup(); + cf_manager.cleanup(); + + delete rdb; + rdb = nullptr; + +// Disown the cache data since we're shutting down. +// This results in memory leaks but it improved the shutdown time. +// Don't disown when running under valgrind +#ifndef HAVE_purify + if (rocksdb_tbl_options.block_cache) { + rocksdb_tbl_options.block_cache->DisownData(); + } +#endif /* HAVE_purify */ + + DBUG_RETURN(error); +} + +static inline void rocksdb_smart_seek(bool seek_backward, + rocksdb::Iterator *const iter, + const rocksdb::Slice &key_slice) { + if (seek_backward) { + iter->SeekForPrev(key_slice); + } else { + iter->Seek(key_slice); + } +} + +/** + @brief + Example of simple lock controls. The "table_handler" it creates is a + structure we will pass to each ha_rocksdb handler. Do you have to have + one of these? Well, you have pieces that are used for locking, and + they are needed to function. +*/ + +Rdb_table_handler * +Rdb_open_tables_map::get_table_handler(const char *const table_name) { + Rdb_table_handler *table_handler; + uint length; + char *tmp_name; + + DBUG_ASSERT(table_name != nullptr); + length = (uint)strlen(table_name); + + // First, look up the table in the hash map. + RDB_MUTEX_LOCK_CHECK(m_mutex); + if (!m_hash.size() || !(table_handler = m_hash.find(table_name, length))) { + // Since we did not find it in the hash map, attempt to create and add it + // to the hash map. + if (!(table_handler = reinterpret_cast(my_multi_malloc( + MYF(MY_WME | MY_ZEROFILL), &table_handler, sizeof(*table_handler), + &tmp_name, length + 1, NullS)))) { + // Allocating a new Rdb_table_handler and a new table name failed. + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + return nullptr; + } + + table_handler->m_ref_count = 0; + table_handler->m_table_name_length = length; + table_handler->m_table_name = tmp_name; + strmov(table_handler->m_table_name, table_name); + + if (m_hash.insert(table_handler)) { + // Inserting into the hash map failed. + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + my_free(table_handler); + return nullptr; + } + + thr_lock_init(&table_handler->m_thr_lock); +#ifdef MARIAROCKS_NOT_YET + table_handler->m_io_perf_read.init(); +#endif + } + DBUG_ASSERT(table_handler->m_ref_count >= 0); + table_handler->m_ref_count++; + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return table_handler; +} + +std::vector rdb_get_open_table_names(void) { + return rdb_open_tables.get_table_names(); +} + +std::vector Rdb_open_tables_map::get_table_names(void) const { + ulong i; + const Rdb_table_handler *table_handler; + std::vector names; + + RDB_MUTEX_LOCK_CHECK(m_mutex); + for (i = 0; (table_handler = m_hash.at(i)); i++) { + DBUG_ASSERT(table_handler != nullptr); + names.push_back(table_handler->m_table_name); + } + DBUG_ASSERT(i == m_hash.size()); + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return names; +} + +void ha_rocksdb::load_auto_incr_value() { + const int save_active_index = active_index; + active_index = table->s->next_number_index; + const uint8 save_table_status = table->status; + + /* + load_auto_incr_value() may be called by statements that + do not execute implicit commits (i.e. SHOW CREATE TABLE). + index_last() creates a snapshot. When a snapshot is created + here, it has to be released as well. (GitHub issue#189) + */ + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + const bool is_new_snapshot = !tx->has_snapshot(); + + // Do a lookup. We only need index column, so it should be index-only. + // (another reason to make it index-only is that table->read_set is + // not set appropriately and non-index-only lookup will not read the value) + const bool save_keyread_only = m_keyread_only; + m_keyread_only = true; + + if (!index_last(table->record[0])) + update_auto_incr_val(); + + m_keyread_only = save_keyread_only; + if (is_new_snapshot) { + tx->release_snapshot(); + } + + table->status = save_table_status; + active_index = save_active_index; + + /* + Do what ha_rocksdb::index_end() does. + (Why don't we use index_init/index_end? class handler defines index_init + as private, for some reason). + */ + release_scan_iterator(); +} + +/* Get PK value from table->record[0]. */ +/* + TODO(alexyang): No existing support for auto_increment on non-pk columns, see + end of ha_rocksdb::create. Also see opened issue here: + https://github.com/facebook/mysql-5.6/issues/153 +*/ +void ha_rocksdb::update_auto_incr_val() { + Field *field; + longlong new_val; + field = table->key_info[table->s->next_number_index].key_part[0].field; + + my_bitmap_map *const old_map = + dbug_tmp_use_all_columns(table, table->read_set); + new_val = field->val_int() + 1; + dbug_tmp_restore_column_map(table->read_set, old_map); + + longlong auto_incr_val = m_tbl_def->m_auto_incr_val; + while (auto_incr_val < new_val && + !m_tbl_def->m_auto_incr_val.compare_exchange_weak(auto_incr_val, + new_val)) { + // Do nothing - just loop until auto_incr_val is >= new_val or + // we successfully set it + } +} + +int ha_rocksdb::load_hidden_pk_value() { + const int save_active_index = active_index; + active_index = m_tbl_def->m_key_count - 1; + const uint8 save_table_status = table->status; + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + const bool is_new_snapshot = !tx->has_snapshot(); + + // Do a lookup. + if (!index_last(table->record[0])) { + /* + Decode PK field from the key + */ + longlong hidden_pk_id = 0; + if (read_hidden_pk_id_from_rowkey(&hidden_pk_id)) { + if (is_new_snapshot) { + tx->release_snapshot(); + } + return HA_ERR_INTERNAL_ERROR; + } + + hidden_pk_id++; + longlong old = m_tbl_def->m_hidden_pk_val; + while ( + old < hidden_pk_id && + !m_tbl_def->m_hidden_pk_val.compare_exchange_weak(old, hidden_pk_id)) { + } + } + + if (is_new_snapshot) { + tx->release_snapshot(); + } + + table->status = save_table_status; + active_index = save_active_index; + + release_scan_iterator(); + + return HA_EXIT_SUCCESS; +} + +/* Get PK value from m_tbl_def->m_hidden_pk_info. */ +longlong ha_rocksdb::update_hidden_pk_val() { + DBUG_ASSERT(has_hidden_pk(table)); + const longlong new_val = m_tbl_def->m_hidden_pk_val++; + return new_val; +} + +/* Get the id of the hidden pk id from m_last_rowkey */ +int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) { + DBUG_ASSERT(hidden_pk_id != nullptr); + DBUG_ASSERT(table != nullptr); + DBUG_ASSERT(has_hidden_pk(table)); + + rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); + + // Get hidden primary key from old key slice + Rdb_string_reader reader(&rowkey_slice); + if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE))) + return HA_EXIT_FAILURE; + + const int length= 8; /* was Field_longlong::PACK_LENGTH in FB MySQL tree */ + const uchar *from = reinterpret_cast(reader.read(length)); + if (from == nullptr) { + return HA_EXIT_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + } + + *hidden_pk_id = rdb_netbuf_read_uint64(&from); + return HA_EXIT_SUCCESS; +} + +/** + @brief + Free lock controls. We call this whenever we close a table. If the table had + the last reference to the table_handler, then we free the memory associated + with it. +*/ + +void Rdb_open_tables_map::release_table_handler( + Rdb_table_handler *const table_handler) { + RDB_MUTEX_LOCK_CHECK(m_mutex); + + DBUG_ASSERT(table_handler != nullptr); + DBUG_ASSERT(table_handler->m_ref_count > 0); + if (!--table_handler->m_ref_count) { + // Last reference was released. Tear down the hash entry. + const auto ret MY_ATTRIBUTE((__unused__)) = m_hash.remove(table_handler); + DBUG_ASSERT(!ret); // the hash entry must actually be found and deleted + my_core::thr_lock_delete(&table_handler->m_thr_lock); + my_free(table_handler); + } + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); +} + +static handler *rocksdb_create_handler(my_core::handlerton *const hton, + my_core::TABLE_SHARE *const table_arg, + my_core::MEM_ROOT *const mem_root) { + return new (mem_root) ha_rocksdb(hton, table_arg); +} + +ha_rocksdb::ha_rocksdb(my_core::handlerton *const hton, + my_core::TABLE_SHARE *const table_arg) + : handler(hton, table_arg), m_table_handler(nullptr), m_scan_it(nullptr), + m_scan_it_skips_bloom(false), m_scan_it_snapshot(nullptr), + m_tbl_def(nullptr), m_pk_descr(nullptr), m_key_descr_arr(nullptr), + m_pk_can_be_decoded(false), m_maybe_unpack_info(false), + m_pk_tuple(nullptr), m_pk_packed_tuple(nullptr), + m_sk_packed_tuple(nullptr), m_end_key_packed_tuple(nullptr), + m_sk_match_prefix(nullptr), m_sk_match_prefix_buf(nullptr), + m_sk_packed_tuple_old(nullptr), m_dup_sk_packed_tuple(nullptr), + m_dup_sk_packed_tuple_old(nullptr), m_pack_buffer(nullptr), + m_lock_rows(RDB_LOCK_NONE), m_keyread_only(FALSE), + m_bulk_load_tx(nullptr), m_encoder_arr(nullptr), + m_row_checksums_checked(0), m_in_rpl_delete_rows(false), + m_in_rpl_update_rows(false), m_force_skip_unique_check(false) { + // TODO(alexyang): create a valid PSI_mutex_key for this mutex + mysql_mutex_init(0, &m_bulk_load_mutex, MY_MUTEX_INIT_FAST); +} + + +bool ha_rocksdb::same_table(const ha_rocksdb &other) const { + return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); +} + +bool ha_rocksdb::init_with_fields() { + DBUG_ENTER_FUNC(); + + const uint pk = table_share->primary_key; + if (pk != MAX_KEY) { + const uint key_parts = table_share->key_info[pk].user_defined_key_parts; + check_keyread_allowed(pk /*PK*/, key_parts - 1, true); + } else + m_pk_can_be_decoded = false; + + cached_table_flags = table_flags(); + + DBUG_RETURN(false); /* Ok */ +} + +/** + Convert record from table->record[0] form into a form that can be written + into rocksdb. + + @param pk_packed_slice Packed PK tuple. We need it in order to compute + and store its CRC. + @param packed_rec OUT Data slice with record data. +*/ + +void ha_rocksdb::convert_record_to_storage_format( + const rocksdb::Slice &pk_packed_slice, + Rdb_string_writer *const pk_unpack_info, rocksdb::Slice *const packed_rec) { + DBUG_ASSERT_IMP(m_maybe_unpack_info, pk_unpack_info); + m_storage_record.length(0); + + /* All NULL bits are initially 0 */ + m_storage_record.fill(m_null_bytes_in_rec, 0); + + // If a primary key may have non-empty unpack_info for certain values, + // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block + // itself was prepared in Rdb_key_def::pack_record. + if (m_maybe_unpack_info) { + m_storage_record.append(reinterpret_cast(pk_unpack_info->ptr()), + pk_unpack_info->get_current_pos()); + } + + for (uint i = 0; i < table->s->fields; i++) { + /* Don't pack decodable PK key parts */ + if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { + continue; + } + + Field *const field = table->field[i]; + if (m_encoder_arr[i].maybe_null()) { + char *const data = (char *)m_storage_record.ptr(); + if (field->is_null()) { + data[m_encoder_arr[i].m_null_offset] |= m_encoder_arr[i].m_null_mask; + /* Don't write anything for NULL values */ + continue; + } + } + + if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_BLOB) { + my_core::Field_blob *blob = (my_core::Field_blob *)field; + /* Get the number of bytes needed to store length*/ + const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; + + /* Store the length of the value */ + m_storage_record.append(reinterpret_cast(blob->ptr), + length_bytes); + + /* Store the blob value itself */ + char *data_ptr; + memcpy(&data_ptr, blob->ptr + length_bytes, sizeof(uchar **)); + m_storage_record.append(data_ptr, blob->get_length()); + } else if (m_encoder_arr[i].m_field_type == MYSQL_TYPE_VARCHAR) { + Field_varstring *const field_var = (Field_varstring *)field; + uint data_len; + /* field_var->length_bytes is 1 or 2 */ + if (field_var->length_bytes == 1) { + data_len = field_var->ptr[0]; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + data_len = uint2korr(field_var->ptr); + } + m_storage_record.append(reinterpret_cast(field_var->ptr), + field_var->length_bytes + data_len); + } else { + /* Copy the field data */ + const uint len = field->pack_length_in_rec(); + m_storage_record.append(reinterpret_cast(field->ptr), len); + } + } + + if (should_store_row_debug_checksums()) { + const uint32_t key_crc32 = my_core::crc32( + 0, rdb_slice_to_uchar_ptr(&pk_packed_slice), pk_packed_slice.size()); + const uint32_t val_crc32 = + my_core::crc32(0, rdb_mysql_str_to_uchar_str(&m_storage_record), + m_storage_record.length()); + uchar key_crc_buf[RDB_CHECKSUM_SIZE]; + uchar val_crc_buf[RDB_CHECKSUM_SIZE]; + rdb_netbuf_store_uint32(key_crc_buf, key_crc32); + rdb_netbuf_store_uint32(val_crc_buf, val_crc32); + m_storage_record.append((const char *)&RDB_CHECKSUM_DATA_TAG, 1); + m_storage_record.append((const char *)key_crc_buf, RDB_CHECKSUM_SIZE); + m_storage_record.append((const char *)val_crc_buf, RDB_CHECKSUM_SIZE); + } + + *packed_rec = + rocksdb::Slice(m_storage_record.ptr(), m_storage_record.length()); +} + +/* + @brief + Setup which fields will be unpacked when reading rows + + @detail + Two special cases when we still unpack all fields: + - When this table is being updated (m_lock_rows==RDB_LOCK_WRITE). + - When @@rocksdb_verify_row_debug_checksums is ON (In this mode, we need to + read all + fields to find whether there is a row checksum at the end. We could skip + the fields instead of decoding them, but currently we do decoding.) + + @seealso + ha_rocksdb::setup_field_converters() + ha_rocksdb::convert_record_from_storage_format() +*/ +void ha_rocksdb::setup_read_decoders() { + m_decoders_vect.clear(); + + int last_useful = 0; + int skip_size = 0; + + for (uint i = 0; i < table->s->fields; i++) { + // We only need the decoder if the whole record is stored. + if (m_encoder_arr[i].m_storage_type != Rdb_field_encoder::STORE_ALL) { + continue; + } + + if (m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || + bitmap_is_set(table->read_set, table->field[i]->field_index)) { + // We will need to decode this field + m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size}); + last_useful = m_decoders_vect.size(); + skip_size = 0; + } else { + if (m_encoder_arr[i].uses_variable_len_encoding() || + m_encoder_arr[i].maybe_null()) { + // For variable-length field, we need to read the data and skip it + m_decoders_vect.push_back({&m_encoder_arr[i], false, skip_size}); + skip_size = 0; + } else { + // Fixed-width field can be skipped without looking at it. + // Add appropriate skip_size to the next field. + skip_size += m_encoder_arr[i].m_pack_length_in_rec; + } + } + } + + // It could be that the last few elements are varchars that just do + // skipping. Remove them. + m_decoders_vect.erase(m_decoders_vect.begin() + last_useful, + m_decoders_vect.end()); +} + +#ifndef NDEBUG +void dbug_append_garbage_at_end(std::string &on_disk_rec) { + on_disk_rec.append("abc"); +} + +void dbug_truncate_record(std::string &on_disk_rec) { on_disk_rec.resize(0); } + +void dbug_modify_rec_varchar12(std::string &on_disk_rec) { + std::string res; + // The record is NULL-byte followed by VARCHAR(10). + // Put the NULL-byte + res.append("\0", 1); + // Then, add a valid VARCHAR(12) value. + res.append("\xC", 1); + res.append("123456789ab", 12); + + on_disk_rec.assign(res); +} + +void dbug_modify_key_varchar8(String &on_disk_rec) { + std::string res; + // The key starts with index number + res.append(on_disk_rec.ptr(), Rdb_key_def::INDEX_NUMBER_SIZE); + + // Then, a mem-comparable form of a varchar(8) value. + res.append("ABCDE\0\0\0\xFC", 9); + on_disk_rec.length(0); + on_disk_rec.append(res.data(), res.size()); +} + +void dbug_create_err_inplace_alter() { + my_printf_error(ER_UNKNOWN_ERROR, + "Intentional failure in inplace alter occurred.", MYF(0)); +} +#endif + +int ha_rocksdb::convert_record_from_storage_format( + const rocksdb::Slice *const key, uchar *const buf) { + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read1", + dbug_append_garbage_at_end(m_retrieved_record);); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read2", + dbug_truncate_record(m_retrieved_record);); + DBUG_EXECUTE_IF("myrocks_simulate_bad_row_read3", + dbug_modify_rec_varchar12(m_retrieved_record);); + + const rocksdb::Slice retrieved_rec_slice(&m_retrieved_record.front(), + m_retrieved_record.size()); + return convert_record_from_storage_format(key, &retrieved_rec_slice, buf); +} + +int ha_rocksdb::convert_blob_from_storage_format( + my_core::Field_blob *const blob, + Rdb_string_reader *const reader, + bool decode) +{ + /* Get the number of bytes needed to store length*/ + const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr; + + const char *data_len_str; + if (!(data_len_str = reader->read(length_bytes))) { + return HA_ERR_INTERNAL_ERROR; + } + + memcpy(blob->ptr, data_len_str, length_bytes); + + const uint32 data_len = blob->get_length( + reinterpret_cast(data_len_str), length_bytes); + const char *blob_ptr; + if (!(blob_ptr = reader->read(data_len))) { + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) { + // set 8-byte pointer to 0, like innodb does (relevant for 32-bit + // platforms) + memset(blob->ptr + length_bytes, 0, 8); + memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **)); + } + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::convert_varchar_from_storage_format( + my_core::Field_varstring *const field_var, + Rdb_string_reader *const reader, + bool decode) +{ + const char *data_len_str; + if (!(data_len_str = reader->read(field_var->length_bytes))) + return HA_ERR_INTERNAL_ERROR; + + uint data_len; + /* field_var->length_bytes is 1 or 2 */ + if (field_var->length_bytes == 1) { + data_len = (uchar)data_len_str[0]; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + data_len = uint2korr(data_len_str); + } + + if (data_len > field_var->field_length) { + /* The data on disk is longer than table DDL allows? */ + return HA_ERR_INTERNAL_ERROR; + } + + if (!reader->read(data_len)) { + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) { + memcpy(field_var->ptr, data_len_str, field_var->length_bytes + data_len); + } + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::convert_field_from_storage_format( + my_core::Field *const field, + Rdb_string_reader *const reader, + bool decode, + uint len) +{ + const char *data_bytes; + if (len > 0) { + if ((data_bytes = reader->read(len)) == nullptr) { + return HA_ERR_INTERNAL_ERROR; + } + + if (decode) + memcpy(field->ptr, data_bytes, len); + } + + return HA_EXIT_SUCCESS; +} + +/* + @brief + Unpack the record in this->m_retrieved_record and this->m_last_rowkey from + storage format into buf (which can be table->record[0] or table->record[1]). + + @param key Table record's key in mem-comparable form. + @param buf Store record in table->record[0] format here + + @detail + If the table has blobs, the unpacked data in buf may keep pointers to the + data in this->m_retrieved_record. + + The key is only needed to check its checksum value (the checksum is in + m_retrieved_record). + + @seealso + ha_rocksdb::setup_read_decoders() Sets up data structures which tell which + columns to decode. + + @return + 0 OK + other Error inpacking the data +*/ + +int ha_rocksdb::convert_record_from_storage_format( + const rocksdb::Slice *const key, const rocksdb::Slice *const value, + uchar *const buf) { + DBUG_ASSERT(key != nullptr); + DBUG_ASSERT(buf != nullptr); + + Rdb_string_reader reader(value); + + /* + Decode PK fields from the key + */ + DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_read1", + dbug_modify_key_varchar8(m_last_rowkey);); + + const rocksdb::Slice rowkey_slice(m_last_rowkey.ptr(), + m_last_rowkey.length()); + const char *unpack_info = nullptr; + uint16 unpack_info_len = 0; + rocksdb::Slice unpack_slice; + + /* Other fields are decoded from the value */ + const char *null_bytes = nullptr; + if (m_null_bytes_in_rec && !(null_bytes = reader.read(m_null_bytes_in_rec))) { + return HA_ERR_INTERNAL_ERROR; + } + + if (m_maybe_unpack_info) { + unpack_info = reader.read(RDB_UNPACK_HEADER_SIZE); + + if (!unpack_info || unpack_info[0] != RDB_UNPACK_DATA_TAG) { + return HA_ERR_INTERNAL_ERROR; + } + + unpack_info_len = + rdb_netbuf_to_uint16(reinterpret_cast(unpack_info + 1)); + unpack_slice = rocksdb::Slice(unpack_info, unpack_info_len); + + reader.read(unpack_info_len - RDB_UNPACK_HEADER_SIZE); + } + + if (m_pk_descr->unpack_record(table, buf, &rowkey_slice, + unpack_info ? &unpack_slice : nullptr, + false /* verify_checksum */)) { + return HA_ERR_INTERNAL_ERROR; + } + + int err = HA_EXIT_SUCCESS; + for (auto it = m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) { + const Rdb_field_encoder *const field_dec = it->m_field_enc; + const bool decode = it->m_decode; + const bool isNull = + field_dec->maybe_null() && + ((null_bytes[field_dec->m_null_offset] & field_dec->m_null_mask) != 0); + + Field *const field = table->field[field_dec->m_field_index]; + + /* Skip the bytes we need to skip */ + if (it->m_skip && !reader.read(it->m_skip)) + return HA_ERR_INTERNAL_ERROR; + + uint field_offset = field->ptr - table->record[0]; + uint null_offset = field->null_offset(); + bool maybe_null = field->real_maybe_null(); + field->move_field(buf + field_offset, + maybe_null ? buf + null_offset : nullptr, + field->null_bit); + // WARNING! - Don't return before restoring field->ptr and field->null_ptr! + + if (isNull) { + if (decode) { + /* This sets the NULL-bit of this record */ + field->set_null(); + /* + Besides that, set the field value to default value. CHECKSUM TABLE + depends on this. + */ + memcpy(field->ptr, table->s->default_values + field_offset, + field->pack_length()); + } + } else { + if (decode) { + field->set_notnull(); + } + + if (field_dec->m_field_type == MYSQL_TYPE_BLOB) { + err = convert_blob_from_storage_format( + (my_core::Field_blob *) field, &reader, decode); + } else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) { + err = convert_varchar_from_storage_format( + (my_core::Field_varstring *) field, &reader, decode); + } else { + err = convert_field_from_storage_format( + field, &reader, decode, field_dec->m_pack_length_in_rec); + } + } + + // Restore field->ptr and field->null_ptr + field->move_field(table->record[0] + field_offset, + maybe_null ? table->record[0] + null_offset : nullptr, + field->null_bit); + + if (err != HA_EXIT_SUCCESS) { + return err; + } + } + + if (m_verify_row_debug_checksums) { + if (reader.remaining_bytes() == RDB_CHECKSUM_CHUNK_SIZE && + reader.read(1)[0] == RDB_CHECKSUM_DATA_TAG) { + uint32_t stored_key_chksum = + rdb_netbuf_to_uint32((const uchar *)reader.read(RDB_CHECKSUM_SIZE)); + uint32_t stored_val_chksum = + rdb_netbuf_to_uint32((const uchar *)reader.read(RDB_CHECKSUM_SIZE)); + + const uint32_t computed_key_chksum = + my_core::crc32(0, rdb_slice_to_uchar_ptr(key), key->size()); + const uint32_t computed_val_chksum = + my_core::crc32(0, rdb_slice_to_uchar_ptr(value), + value->size() - RDB_CHECKSUM_CHUNK_SIZE); + + DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum1", + stored_key_chksum++;); + + if (stored_key_chksum != computed_key_chksum) { + m_pk_descr->report_checksum_mismatch(true, key->data(), key->size()); + return HA_ERR_INTERNAL_ERROR; + } + + DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum2", + stored_val_chksum++;); + if (stored_val_chksum != computed_val_chksum) { + m_pk_descr->report_checksum_mismatch(false, value->data(), + value->size()); + return HA_ERR_INTERNAL_ERROR; + } + + m_row_checksums_checked++; + } + if (reader.remaining_bytes()) + return HA_ERR_INTERNAL_ERROR; + } + + return HA_EXIT_SUCCESS; +} + +void ha_rocksdb::get_storage_type(Rdb_field_encoder *const encoder, + const uint &kp) { + // STORE_SOME uses unpack_info. + if (m_pk_descr->has_unpack_info(kp)) { + DBUG_ASSERT(m_pk_descr->can_unpack(kp)); + encoder->m_storage_type = Rdb_field_encoder::STORE_SOME; + m_maybe_unpack_info = true; + } else if (m_pk_descr->can_unpack(kp)) { + encoder->m_storage_type = Rdb_field_encoder::STORE_NONE; + } +} + +/* + Setup data needed to convert table->record[] to and from record storage + format. + + @seealso + ha_rocksdb::convert_record_to_storage_format, + ha_rocksdb::convert_record_from_storage_format +*/ + +void ha_rocksdb::setup_field_converters() { + uint i; + uint null_bytes = 0; + uchar cur_null_mask = 0x1; + + DBUG_ASSERT(m_encoder_arr == nullptr); + m_encoder_arr = static_cast( + my_malloc(table->s->fields * sizeof(Rdb_field_encoder), MYF(0))); + if (m_encoder_arr == nullptr) { + return; + } + + for (i = 0; i < table->s->fields; i++) { + Field *const field = table->field[i]; + m_encoder_arr[i].m_storage_type = Rdb_field_encoder::STORE_ALL; + + /* + Check if this field is + - a part of primary key, and + - it can be decoded back from its key image. + If both hold, we don't need to store this field in the value part of + RocksDB's key-value pair. + + If hidden pk exists, we skip this check since the field will never be + part of the hidden pk. + */ + if (!has_hidden_pk(table) && + field->part_of_key.is_set(table->s->primary_key)) { + KEY *const pk_info = &table->key_info[table->s->primary_key]; + for (uint kp = 0; kp < pk_info->user_defined_key_parts; kp++) { + /* key_part->fieldnr is counted from 1 */ + if (field->field_index + 1 == pk_info->key_part[kp].fieldnr) { + get_storage_type(&m_encoder_arr[i], kp); + break; + } + } + } + + m_encoder_arr[i].m_field_type = field->real_type(); + m_encoder_arr[i].m_field_index = i; + m_encoder_arr[i].m_pack_length_in_rec = field->pack_length_in_rec(); + + if (field->real_maybe_null()) { + m_encoder_arr[i].m_null_mask = cur_null_mask; + m_encoder_arr[i].m_null_offset = null_bytes; + if (cur_null_mask == 0x80) { + cur_null_mask = 0x1; + null_bytes++; + } else + cur_null_mask = cur_null_mask << 1; + } else { + m_encoder_arr[i].m_null_mask = 0; + } + } + + /* Count the last, unfinished NULL-bits byte */ + if (cur_null_mask != 0x1) + null_bytes++; + + m_null_bytes_in_rec = null_bytes; +} + +int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool alloc_alter_buffers) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(m_pk_tuple == nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + std::shared_ptr *const kd_arr = tbl_def_arg->m_key_descr_arr; + + uint key_len = 0; + uint max_packed_sk_len = 0; + uint pack_key_len = 0; + + m_pk_descr = kd_arr[pk_index(table_arg, tbl_def_arg)]; + if (has_hidden_pk(table_arg)) { + m_pk_key_parts = 1; + } else { + m_pk_key_parts = + table->key_info[table->s->primary_key].user_defined_key_parts; + key_len = table->key_info[table->s->primary_key].key_length; + } + + // move this into get_table_handler() ?? + m_pk_descr->setup(table_arg, tbl_def_arg); + + m_pk_tuple = reinterpret_cast(my_malloc(key_len, MYF(0))); + if (m_pk_tuple == nullptr) { + goto error; + } + + pack_key_len = m_pk_descr->max_storage_fmt_length(); + m_pk_packed_tuple = + reinterpret_cast(my_malloc(pack_key_len, MYF(0))); + if (m_pk_packed_tuple == nullptr) { + goto error; + } + + /* Sometimes, we may use m_sk_packed_tuple for storing packed PK */ + max_packed_sk_len = pack_key_len; + for (uint i = 0; i < table_arg->s->keys; i++) { + if (i == table_arg->s->primary_key) /* Primary key was processed above */ + continue; + + // TODO: move this into get_table_handler() ?? + kd_arr[i]->setup(table_arg, tbl_def_arg); + + const uint packed_len = kd_arr[i]->max_storage_fmt_length(); + if (packed_len > max_packed_sk_len) { + max_packed_sk_len = packed_len; + } + } + + if (!(m_sk_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_sk_match_prefix_buf = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_sk_packed_tuple_old = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_end_key_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !((m_pack_buffer = reinterpret_cast( + my_malloc(max_packed_sk_len, MYF(0)))))) { + goto error; + } + + /* + If inplace alter is happening, allocate special buffers for unique + secondary index duplicate checking. + */ + if (alloc_alter_buffers && + (!(m_dup_sk_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || + !(m_dup_sk_packed_tuple_old = reinterpret_cast( + my_malloc(max_packed_sk_len, MYF(0)))))) { + goto error; + } + + DBUG_RETURN(HA_EXIT_SUCCESS); + +error: + // If we're here then this means that at some point above an allocation may + // have failed. To avoid any resource leaks and maintain a clear contract + // we'll clean up before returning the error code. + free_key_buffers(); + + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); +} + +void ha_rocksdb::free_key_buffers() { + my_free(m_pk_tuple); + m_pk_tuple = nullptr; + + my_free(m_pk_packed_tuple); + m_pk_packed_tuple = nullptr; + + my_free(m_sk_packed_tuple); + m_sk_packed_tuple = nullptr; + + my_free(m_sk_match_prefix_buf); + m_sk_match_prefix_buf = nullptr; + + my_free(m_sk_packed_tuple_old); + m_sk_packed_tuple_old = nullptr; + + my_free(m_end_key_packed_tuple); + m_end_key_packed_tuple = nullptr; + + my_free(m_pack_buffer); + m_pack_buffer = nullptr; + + my_free(m_dup_sk_packed_tuple); + m_dup_sk_packed_tuple = nullptr; + + my_free(m_dup_sk_packed_tuple_old); + m_dup_sk_packed_tuple_old = nullptr; +} + +#ifdef MARIAROCKS_NOT_YET +void ha_rocksdb::set_use_read_free_rpl(const char *const whitelist) { + DBUG_ASSERT(whitelist != nullptr); + +#if defined(HAVE_PSI_INTERFACE) + Regex_list_handler regex_handler(key_rwlock_read_free_rpl_tables); +#else + Regex_list_handler regex_handler; +#endif + + if (!regex_handler.set_patterns(whitelist)) { + warn_about_bad_patterns(®ex_handler, "read_free_rpl_tables"); + } + + m_use_read_free_rpl = regex_handler.matches(m_tbl_def->base_tablename()); +} +#endif + +void ha_rocksdb::set_skip_unique_check_tables(const char *const whitelist) { + DBUG_ASSERT(whitelist != nullptr); + +#if defined(HAVE_PSI_INTERFACE) + Regex_list_handler regex_handler(key_rwlock_skip_unique_check_tables); +#else + Regex_list_handler regex_handler; +#endif + + if (!regex_handler.set_patterns(whitelist)) { + warn_about_bad_patterns(®ex_handler, "skip_unique_check_tables"); + } + + m_skip_unique_check = regex_handler.matches(m_tbl_def->base_tablename()); +} + +int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { + DBUG_ENTER_FUNC(); + + if (close()) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + m_table_handler = rdb_open_tables.get_table_handler(name); + + if (m_table_handler == nullptr) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + my_core::thr_lock_data_init(&m_table_handler->m_thr_lock, &m_db_lock, + nullptr); + m_io_perf.init(&m_table_handler->m_table_perf_context, + &m_table_handler->m_io_perf_read, &stats); + + std::string fullname; + if (rdb_normalize_tablename(name, &fullname)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + m_tbl_def = ddl_manager.find(fullname); + if (m_tbl_def == nullptr) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "Attempt to open a table that is not present in RocksDB-SE data " + "dictionary"); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + m_lock_rows = RDB_LOCK_NONE; + + m_key_descr_arr = m_tbl_def->m_key_descr_arr; + + /* + Full table scan actually uses primary key + (UPDATE needs to know this, otherwise it will go into infinite loop on + queries like "UPDATE tbl SET pk=pk+100") + */ + key_used_on_scan = table->s->primary_key; + + // close() above has already called free_key_buffers(). No need to do it here. + int err = alloc_key_buffers(table, m_tbl_def); + + if (err) { + DBUG_RETURN(err); + } + + /* + init_with_fields() is used to initialize table flags based on the field + definitions in table->field[]. + It is called by open_binary_frm(), but that function calls the method for + a temporary ha_rocksdb object which is later destroyed. + + If we are here in ::open(), then init_with_fields() has not been called + for this object. Call it ourselves, we want all member variables to be + properly initialized. + */ + init_with_fields(); + + setup_field_converters(); + + info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); + + /* + The following load_XXX code calls row decode functions, and they do + that without having done ::external_lock() or index_init()/rnd_init(). + (Note: this also means we're doing a read when there was no + setup_field_converters() call) + + Initialize the necessary variables for them: + */ + m_verify_row_debug_checksums = false; + + /* TODO: move the following to where TABLE_SHARE is opened: */ + if (table->found_next_number_field) + load_auto_incr_value(); + + if (has_hidden_pk(table) && load_hidden_pk_value()) { + free_key_buffers(); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* Index block size in MyRocks: used by MySQL in query optimization */ + stats.block_size = rocksdb_tbl_options.block_size; + +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 + /* Determine at open whether we can use Read Free Replication or not */ + set_use_read_free_rpl(THDVAR(ha_thd(), read_free_rpl_tables)); +#endif + + /* Determine at open whether we should skip unique checks for this table */ + set_skip_unique_check_tables(THDVAR(ha_thd(), skip_unique_check_tables)); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +int ha_rocksdb::close(void) { + DBUG_ENTER_FUNC(); + + m_pk_descr = nullptr; + m_key_descr_arr = nullptr; + + free_key_buffers(); + + my_free(m_encoder_arr); + m_encoder_arr = nullptr; + + if (m_table_handler != nullptr) { + rdb_open_tables.release_table_handler(m_table_handler); + m_table_handler = nullptr; + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +bool ha_rocksdb::get_error_message(const int error, String *const buf) { + DBUG_ENTER_FUNC(); + + static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST, + "HA_ERR_ROCKSDB_LAST > HA_ERR_FIRST"); + static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_LAST, + "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST"); + + DBUG_ASSERT(error > 0); + DBUG_ASSERT(error <= HA_ERR_ROCKSDB_LAST); + DBUG_ASSERT(buf != nullptr); + + Rdb_transaction *const tx = get_tx_from_thd(ha_thd()); + bool temp_error = false; + + switch (error) { + case HA_ERR_ROCKSDB_PK_REQUIRED: + buf->append("Table must have a PRIMARY KEY."); + break; + case HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED: + buf->append("Unique indexes are not supported."); + break; + case HA_ERR_ROCKSDB_TOO_MANY_LOCKS: + buf->append("Number of locks held reached @@rocksdb_max_row_locks."); + break; + case HA_ERR_LOCK_WAIT_TIMEOUT: + DBUG_ASSERT(tx != nullptr); + buf->append(tx->m_detailed_error); + temp_error = true; + break; + case HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED: + buf->append("Specifying DATA DIRECTORY for an individual table is not " + "supported."); + break; + case HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED: + buf->append("Specifying INDEX DIRECTORY for an individual table is not " + "supported."); + break; + default: + // We can be called with the values which are < HA_ERR_FIRST because most + // MySQL internal functions will just return HA_EXIT_FAILURE in case of + // an error. + break; + } + + DBUG_RETURN(temp_error); +} + +/* MyRocks supports only the following collations for indexed columns */ +static const std::set RDB_INDEX_COLLATIONS = { + COLLATION_BINARY, COLLATION_UTF8_BIN, COLLATION_LATIN1_BIN}; + +static bool +rdb_is_index_collation_supported(const my_core::Field *const field) { + const my_core::enum_field_types type = field->real_type(); + /* Handle [VAR](CHAR|BINARY) or TEXT|BLOB */ + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING || + type == MYSQL_TYPE_BLOB) { + return RDB_INDEX_COLLATIONS.find(field->charset()->number) != + RDB_INDEX_COLLATIONS.end(); + } + return true; +} + +/* + Create structures needed for storing data in rocksdb. This is called when the + table is created. The structures will be shared by all TABLE* objects. + + @param + table_arg Table with definition + db_table "dbname.tablename" + len strlen of the above + tbl_def_arg tbl_def whose key_descr is being created/populated + old_tbl_def_arg tbl_def from which keys are being copied over from + (for use during inplace alter) + + @return + 0 - Ok + other - error, either given table ddl is not supported by rocksdb or OOM. +*/ +int ha_rocksdb::create_key_defs( + const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg /* = nullptr */, + const Rdb_tbl_def *const old_tbl_def_arg + /* = nullptr */) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + + uint i; + + /* + These need to be one greater than MAX_INDEXES since the user can create + MAX_INDEXES secondary keys and no primary key which would cause us + to generate a hidden one. + */ + std::array cfs; + + /* + NOTE: All new column families must be created before new index numbers are + allocated to each key definition. See below for more details. + http://github.com/MySQLOnRocksDB/mysql-5.6/issues/86#issuecomment-138515501 + */ + if (create_cfs(table_arg, tbl_def_arg, &cfs)) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + if (!old_tbl_def_arg) { + /* + old_tbl_def doesn't exist. this means we are in the process of creating + a new table. + + Get the index numbers (this will update the next_index_number) + and create Rdb_key_def structures. + */ + for (i = 0; i < tbl_def_arg->m_key_count; i++) { + if (create_key_def(table_arg, i, tbl_def_arg, &m_key_descr_arr[i], + cfs[i])) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + } + } else { + /* + old_tbl_def exists. This means we are creating a new tbl_def as part of + in-place alter table. Copy over existing keys from the old_tbl_def and + generate the necessary new key definitions if any. + */ + if (create_inplace_key_defs(table_arg, tbl_def_arg, old_table_arg, + old_tbl_def_arg, cfs)) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Checks index parameters and creates column families needed for storing data + in rocksdb if necessary. + + @param in + table_arg Table with definition + db_table Table name + tbl_def_arg Table def structure being populated + + @param out + cfs CF info for each key definition in 'key_info' order + + @return + 0 - Ok + other - error +*/ +int ha_rocksdb::create_cfs( + const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + std::array *const cfs) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + char tablename_sys[NAME_LEN + 1]; + bool tsys_set= false; + + /* + The first loop checks the index parameters and creates + column families if necessary. + */ + for (uint i = 0; i < tbl_def_arg->m_key_count; i++) { + rocksdb::ColumnFamilyHandle *cf_handle; + + if (rocksdb_strict_collation_check && + !is_hidden_pk(i, table_arg, tbl_def_arg) && + tbl_def_arg->base_tablename().find(tmp_file_prefix) != 0) { + if (!tsys_set) + { + tsys_set= true; + my_core::filename_to_tablename(tbl_def_arg->base_tablename().c_str(), + tablename_sys, sizeof(tablename_sys)); + } + + for (uint part = 0; part < table_arg->key_info[i].ext_key_parts; + part++) + { + if (!rdb_is_index_collation_supported( + table_arg->key_info[i].key_part[part].field) && + !rdb_collation_exceptions->matches(tablename_sys)) { + std::string collation_err; + for (const auto &coll : RDB_INDEX_COLLATIONS) { + if (collation_err != "") { + collation_err += ", "; + } + collation_err += get_charset_name(coll); + } + my_printf_error( + ER_UNKNOWN_ERROR, "Unsupported collation on string indexed " + "column %s.%s Use binary collation (%s).", + MYF(0), tbl_def_arg->full_tablename().c_str(), + table_arg->key_info[i].key_part[part].field->field_name, + collation_err.c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + } + } + + // Internal consistency check to make sure that data in TABLE and + // Rdb_tbl_def structures matches. Either both are missing or both are + // specified. Yes, this is critical enough to make it into SHIP_ASSERT. + SHIP_ASSERT(!table_arg->part_info == tbl_def_arg->base_partition().empty()); + + // Generate the name for the column family to use. + bool per_part_match_found = false; + std::string cf_name = generate_cf_name(i, table_arg, tbl_def_arg, + &per_part_match_found); + + const char *const key_name = get_key_name(i, table_arg, tbl_def_arg); + + if (looks_like_per_index_cf_typo(cf_name.c_str())) { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "column family name looks like a typo of $per_index_cf."); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + // Prevent create from using the system column family. + if (!cf_name.empty() && strcmp(DEFAULT_SYSTEM_CF_NAME, + cf_name.c_str()) == 0) { + my_error(ER_WRONG_ARGUMENTS, MYF(0), + "column family not valid for storing index data."); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + bool is_auto_cf_flag; + + // Here's how `get_or_create_cf` will use the input parameters: + // + // `cf_name` - will be used as a CF name. + // `key_name` - will be only used in case of "$per_index_cf". + cf_handle = + cf_manager.get_or_create_cf(rdb, cf_name.c_str(), + tbl_def_arg->full_tablename(), key_name, + &is_auto_cf_flag); + + if (!cf_handle) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + auto &cf = (*cfs)[i]; + + cf.cf_handle = cf_handle; + cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str()); + cf.is_auto_cf = is_auto_cf_flag; + cf.is_per_partition_cf = per_part_match_found; + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Create key definition needed for storing data in rocksdb during ADD index + inplace operations. + + @param in + table_arg Table with definition + tbl_def_arg New table def structure being populated + old_tbl_def_arg Old(current) table def structure + cfs Struct array which contains column family information + + @return + 0 - Ok + other - error, either given table ddl is not supported by rocksdb or OOM. +*/ +int ha_rocksdb::create_inplace_key_defs( + const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg, const Rdb_tbl_def *const old_tbl_def_arg, + const std::array &cfs) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(old_tbl_def_arg != nullptr); + + std::shared_ptr *const old_key_descr = + old_tbl_def_arg->m_key_descr_arr; + std::shared_ptr *const new_key_descr = + tbl_def_arg->m_key_descr_arr; + const std::unordered_map old_key_pos = + get_old_key_positions(table_arg, tbl_def_arg, old_table_arg, + old_tbl_def_arg); + + uint i; + for (i = 0; i < tbl_def_arg->m_key_count; i++) { + const auto &it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); + if (it != old_key_pos.end()) { + /* + Found matching index in old table definition, so copy it over to the + new one created. + */ + const Rdb_key_def &okd = *old_key_descr[it->second]; + + uint16 index_dict_version = 0; + uchar index_type = 0; + uint16 kv_version = 0; + const GL_INDEX_ID gl_index_id = okd.get_gl_index_id(); + if (!dict_manager.get_index_info(gl_index_id, &index_dict_version, + &index_type, &kv_version)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Could not get index information " + "for Index Number (%u,%u), table %s", + gl_index_id.cf_id, gl_index_id.index_id, + old_tbl_def_arg->full_tablename().c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* + We can't use the copy constructor because we need to update the + keynr within the pack_info for each field and the keyno of the keydef + itself. + */ + new_key_descr[i] = std::make_shared( + okd.get_index_number(), i, okd.get_cf(), index_dict_version, + index_type, kv_version, okd.m_is_reverse_cf, okd.m_is_auto_cf, + okd.m_is_per_partition_cf, okd.m_name.c_str(), + dict_manager.get_stats(gl_index_id)); + } else if (create_key_def(table_arg, i, tbl_def_arg, &new_key_descr[i], + cfs[i])) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + DBUG_ASSERT(new_key_descr[i] != nullptr); + new_key_descr[i]->setup(table_arg, tbl_def_arg); + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +std::unordered_map ha_rocksdb::get_old_key_positions( + const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg, + const Rdb_tbl_def *const old_tbl_def_arg) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(old_table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(old_tbl_def_arg != nullptr); + + std::shared_ptr *const old_key_descr = + old_tbl_def_arg->m_key_descr_arr; + std::unordered_map old_key_pos; + std::unordered_map new_key_pos; + uint i; + + for (i = 0; i < tbl_def_arg->m_key_count; i++) { + new_key_pos[get_key_name(i, table_arg, tbl_def_arg)] = i; + } + + for (i = 0; i < old_tbl_def_arg->m_key_count; i++) { + if (is_hidden_pk(i, old_table_arg, old_tbl_def_arg)) { + old_key_pos[old_key_descr[i]->m_name] = i; + continue; + } + + /* + In case of matching key name, need to check key parts of keys as well, + in case a simultaneous drop + add is performed, where the key name is the + same but the key parts are different. + + Example: + CREATE TABLE t1 (a INT, b INT, KEY ka(a)) ENGINE=RocksDB; + ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; + */ + const KEY *const old_key = &old_table_arg->key_info[i]; + const auto &it = new_key_pos.find(old_key->name); + if (it == new_key_pos.end()) { + continue; + } + + KEY *const new_key = &table_arg->key_info[it->second]; + + if (!compare_key_parts(old_key, new_key)) { + old_key_pos[old_key->name] = i; + } + } + + DBUG_RETURN(old_key_pos); +} + +/* Check two keys to ensure that key parts within keys match */ +int ha_rocksdb::compare_key_parts(const KEY *const old_key, + const KEY *const new_key) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(old_key != nullptr); + DBUG_ASSERT(new_key != nullptr); + + /* Skip if key parts do not match, as it is a different key */ + if (new_key->user_defined_key_parts != old_key->user_defined_key_parts) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* Check to see that key parts themselves match */ + for (uint i = 0; i < old_key->user_defined_key_parts; i++) { + if (strcmp(old_key->key_part[i].field->field_name, + new_key->key_part[i].field->field_name) != 0) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Create key definition needed for storing data in rocksdb. + This can be called either during CREATE table or doing ADD index operations. + + @param in + table_arg Table with definition + i Position of index being created inside table_arg->key_info + tbl_def_arg Table def structure being populated + cf_info Struct which contains column family information + + @param out + new_key_def Newly created index definition. + + @return + 0 - Ok + other - error, either given table ddl is not supported by rocksdb or OOM. +*/ +int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i, + const Rdb_tbl_def *const tbl_def_arg, + std::shared_ptr *const new_key_def, + const struct key_def_cf_info &cf_info) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(new_key_def != nullptr); + DBUG_ASSERT(*new_key_def == nullptr); + + const uint index_id = ddl_manager.get_and_update_next_number(&dict_manager); + const uint16_t index_dict_version = Rdb_key_def::INDEX_INFO_VERSION_LATEST; + uchar index_type; + uint16_t kv_version; + + if (is_hidden_pk(i, table_arg, tbl_def_arg)) { + index_type = Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY; + kv_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + } else if (i == table_arg->s->primary_key) { + index_type = Rdb_key_def::INDEX_TYPE_PRIMARY; + uint16 pk_latest_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + kv_version = pk_latest_version; + } else { + index_type = Rdb_key_def::INDEX_TYPE_SECONDARY; + uint16 sk_latest_version = Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + kv_version = sk_latest_version; + } + + const char *const key_name = get_key_name(i, table_arg, m_tbl_def); + *new_key_def = std::make_shared( + index_id, i, cf_info.cf_handle, index_dict_version, index_type, + kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, + cf_info.is_per_partition_cf, key_name); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +int rdb_normalize_tablename(const std::string &tablename, + std::string *const strbuf) { + DBUG_ASSERT(strbuf != nullptr); + + if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != FN_LIBCHAR) { + DBUG_ASSERT(0); // We were not passed table name? + return HA_ERR_INTERNAL_ERROR; + } + + size_t pos = tablename.find_first_of(FN_LIBCHAR, 2); + if (pos == std::string::npos) { + DBUG_ASSERT(0); // We were not passed table name? + return HA_ERR_INTERNAL_ERROR; + } + + *strbuf = tablename.substr(2, pos - 2) + "." + tablename.substr(pos + 1); + + return HA_EXIT_SUCCESS; +} + +/* + Check to see if the user's original statement includes foreign key + references +*/ +bool ha_rocksdb::contains_foreign_key(THD *const thd) { + bool success; + const char *str = thd_query_string(thd)->str; + + DBUG_ASSERT(str != nullptr); + + while (*str != '\0') { + // Scan from our current pos looking for 'FOREIGN' + str = rdb_find_in_string(str, "FOREIGN", &success); + if (!success) { + return false; + } + + // Skip past the found "FOREIGN' + str = rdb_check_next_token(&my_charset_bin, str, "FOREIGN", &success); + DBUG_ASSERT(success); + + if (!my_isspace(&my_charset_bin, *str)) { + return false; + } + + // See if the next token is 'KEY' + str = rdb_check_next_token(&my_charset_bin, str, "KEY", &success); + if (!success) { + continue; + } + + // See if the next token is '(' + str = rdb_check_next_token(&my_charset_bin, str, "(", &success); + if (!success) { + // There is an optional index id after 'FOREIGN KEY', skip it + str = rdb_skip_id(&my_charset_bin, str); + + // Now check for '(' again + str = rdb_check_next_token(&my_charset_bin, str, "(", &success); + } + + // If we have found 'FOREIGN KEY [] (' we can be confident we have + // a foreign key clause. + return success; + } + + // We never found a valid foreign key clause + return false; +} + +/** + @brief + splits the normalized table name of .#P# into + the , and components. + + @param dbbuf returns database name/table_schema + @param tablebuf returns tablename + @param partitionbuf returns partition suffix if there is one + @return HA_EXIT_SUCCESS on success, non-zero on failure to split +*/ +int rdb_split_normalized_tablename(const std::string &fullname, + std::string *const db, + std::string *const table, + std::string *const partition) { + DBUG_ASSERT(!fullname.empty()); + +#define RDB_PARTITION_STR "#P#" + + /* Normalize returns dbname.tablename. */ + size_t dotpos = fullname.find('.'); + + /* Invalid table name? */ + if (dotpos == std::string::npos) { + return HA_ERR_INTERNAL_ERROR; + } + + // Table must have a database name associated with it. + DBUG_ASSERT(dotpos > 0); + + if (db != nullptr) { + *db = fullname.substr(0, dotpos); + } + + dotpos++; + + const size_t partpos = + fullname.find(RDB_PARTITION_STR, dotpos, strlen(RDB_PARTITION_STR)); + + if (partpos != std::string::npos) { + DBUG_ASSERT(partpos >= dotpos); + + if (table != nullptr) { + *table = fullname.substr(dotpos, partpos - dotpos); + } + + if (partition != nullptr) { + *partition = fullname.substr(partpos + strlen(RDB_PARTITION_STR)); + } + } else if (table != nullptr) { + *table = fullname.substr(dotpos); + } + + return HA_EXIT_SUCCESS; +} + +/** + @brief + create() is called to create a database. The variable name will have the name + of the table. + + @details + When create() is called you do not need to worry about + opening the table. Also, the .frm file will have already been + created so adjusting create_info is not necessary. You can overwrite + the .frm file at this point if you wish to change the table + definition, but there are no methods currently provided for doing + so. + + Called from handle.cc by ha_create_table(). + + @see + ha_create_table() in handle.cc +*/ + +int ha_rocksdb::create(const char *const name, TABLE *const table_arg, + HA_CREATE_INFO *const create_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(create_info != nullptr); + + if (create_info->data_file_name) { + // DATA DIRECTORY is used to create tables under a specific location + // outside the MySQL data directory. We don't support this for MyRocks. + // The `rocksdb_datadir` setting should be used to configure RocksDB data + // directory. + print_error(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED, + MYF(ME_JUST_WARNING)); + DBUG_RETURN(HA_WRONG_CREATE_OPTION); + } + + if (create_info->index_file_name) { + // Similar check for INDEX DIRECTORY as well. + print_error(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED, + MYF(ME_JUST_WARNING)); + DBUG_RETURN(HA_WRONG_CREATE_OPTION); + } + + int res; + std::string str; + + THD *const thd = my_core::thd_get_current_thd(); + + if (get_table_if_exists(name)) { + if (thd->lex->sql_command == SQLCOM_TRUNCATE) { + if (delete_table(name)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } else { + if (rdb_normalize_tablename(name, &str)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + my_printf_error( + ER_UNKNOWN_ERROR, + "Table '%s' does not exist, but metadata information exists inside " + "MyRocks. This is a sign of data inconsistency. Please check if " + "'%s.frm' exists, and try to restore it if it does not exist.", + MYF(0), str.c_str(), name); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + + /* + Construct dbname.tablename ourselves, because parititioning + passes strings like "./test/t14#P#p0" for individual partitions, + while table_arg->s->table_name has none of that. + */ + if (rdb_normalize_tablename(name, &str)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + if (contains_foreign_key(thd)) { + my_printf_error(ER_NOT_SUPPORTED_YET, "MyRocks does not currently support " + "foreign key constraints", + MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* + TODO(alexyang): Temporarily disable unique indexes support when there is no + Primary Key + */ + for (uint keyno = 0; keyno < table_arg->s->keys; keyno++) { + if ((table_arg->key_info[keyno].flags & HA_NOSAME) && + has_hidden_pk(table_arg)) { + my_printf_error(ER_UNKNOWN_ERROR, "Unique index support is disabled when " + "the table has no primary key. ", + MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + + bool write_err = false; + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + /* Create table/key descriptions and put them into the data dictionary */ + m_tbl_def = new Rdb_tbl_def(str); + + uint n_keys = table_arg->s->keys; + + /* + If no primary key found, create a hidden PK and place it inside table + definition + */ + if (has_hidden_pk(table_arg)) { + n_keys += 1; + } + + m_key_descr_arr = new std::shared_ptr[n_keys]; + m_tbl_def->m_key_count = n_keys; + m_tbl_def->m_key_descr_arr = m_key_descr_arr; + + if ((res = create_key_defs(table_arg, m_tbl_def))) { + goto error; + } + + m_pk_descr = m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; + + dict_manager.lock(); + write_err = + ddl_manager.put_and_write(m_tbl_def, batch) || dict_manager.commit(batch); + dict_manager.unlock(); + + if (write_err) { + goto error; + } + + if (create_info->auto_increment_value) + m_tbl_def->m_auto_incr_val = create_info->auto_increment_value; + + /* + We only support auto_increment at start of the PRIMARY KEY. + */ + // Field *field; + // if ((field= table_arg->next_number_field)) + /* TODO mdcallag: disable this for now to let UNIQUE indexes kind of work + if ((field= table_arg->found_next_number_field)) + { + int pk= table_arg->s->primary_key; + Field *pk_field= table_arg->key_info[pk].key_part[0].field; + if (field->field_index != pk_field->field_index) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + */ + DBUG_RETURN(HA_EXIT_SUCCESS); + +error: + /* Delete what we have allocated so far */ + delete m_tbl_def; + m_tbl_def = nullptr; + m_key_descr_arr = nullptr; + + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); +} + +/** + @note + This function is used only when the table has not yet been opened, and + keyread_allowed bitmap doesn't have the correct values yet. + + See comment in ha_rocksdb::index_flags() for details. +*/ + +bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, + bool all_parts) const { + bool res = true; + KEY *const key_info = &table_share->key_info[inx]; + + Rdb_field_packing dummy1; + res = dummy1.setup(nullptr, key_info->key_part[part].field, inx, part, + key_info->key_part[part].length); + + if (res && all_parts) { + for (uint i = 0; i < part; i++) { + Field *field; + if ((field = key_info->key_part[i].field)) { + Rdb_field_packing dummy; + if (!dummy.setup(nullptr, field, inx, i, + key_info->key_part[i].length)) { + /* Cannot do index-only reads for this column */ + res = false; + break; + } + } + } + } + + const uint pk = table_share->primary_key; + if (inx == pk && all_parts && + part + 1 == table_share->key_info[pk].user_defined_key_parts) { + m_pk_can_be_decoded = res; + } + + return res; +} + +int ha_rocksdb::read_key_exact(const Rdb_key_def &kd, + rocksdb::Iterator *const iter, + const bool &full_key_match, + const rocksdb::Slice &key_slice) const { + DBUG_ASSERT(iter != nullptr); + + /* + We are looking for the first record such that + index_tuple= lookup_tuple. + lookup_tuple may be a prefix of the index. + */ + rocksdb_smart_seek(kd.m_is_reverse_cf, iter, key_slice); + if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) { + /* + Got a record that is not equal to the lookup value, or even a record + from another table.index. + */ + return HA_ERR_KEY_NOT_FOUND; + } + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::read_before_key(const Rdb_key_def &kd, + const bool &full_key_match, + const rocksdb::Slice &key_slice) { + /* + We are looking for record with the biggest t.key such that + t.key < lookup_tuple. + */ + rocksdb_smart_seek(!kd.m_is_reverse_cf, m_scan_it, key_slice); + if (m_scan_it->Valid() && full_key_match && + kd.value_matches_prefix(m_scan_it->key(), key_slice)) { + /* We are using full key and we've hit an exact match */ + if (kd.m_is_reverse_cf) { + m_scan_it->Next(); + } else { + m_scan_it->Prev(); + } + } + return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; +} + +int ha_rocksdb::read_after_key(const Rdb_key_def &kd, + const rocksdb::Slice &key_slice) { + /* + We are looking for the first record such that + + index_tuple $GT lookup_tuple + + with HA_READ_AFTER_KEY, $GT = '>', + with HA_READ_KEY_OR_NEXT, $GT = '>=' + */ + rocksdb_smart_seek(kd.m_is_reverse_cf, m_scan_it, key_slice); + return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; +} + +int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd, + const enum ha_rkey_function &find_flag, + const bool &full_key_match, + const uchar *const key, + const key_part_map &keypart_map, + const rocksdb::Slice &key_slice, + bool *const move_forward) { + int rc = 0; + + *move_forward = true; + + switch (find_flag) { + case HA_READ_KEY_EXACT: + rc = read_key_exact(kd, m_scan_it, full_key_match, key_slice); + break; + case HA_READ_BEFORE_KEY: + *move_forward = false; + rc = read_before_key(kd, full_key_match, key_slice); + if (rc == 0 && !kd.covers_key(m_scan_it->key())) { + /* The record we've got is not from this index */ + rc = HA_ERR_KEY_NOT_FOUND; + } + break; + case HA_READ_AFTER_KEY: + case HA_READ_KEY_OR_NEXT: + rc = read_after_key(kd, key_slice); + if (rc == 0 && !kd.covers_key(m_scan_it->key())) { + /* The record we've got is not from this index */ + rc = HA_ERR_KEY_NOT_FOUND; + } + break; + case HA_READ_KEY_OR_PREV: + case HA_READ_PREFIX: + /* This flag is not used by the SQL layer, so we don't support it yet. */ + rc = HA_ERR_UNSUPPORTED; + break; + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: + *move_forward = false; + /* + Find the last record with the specified index prefix lookup. + - HA_READ_PREFIX_LAST requires that the record has the + prefix=lookup (if there are no such records, + HA_ERR_KEY_NOT_FOUND should be returned). + - HA_READ_PREFIX_LAST_OR_PREV has no such requirement. If there are no + records with prefix=lookup, we should return the last record + before that. + */ + rc = read_before_key(kd, full_key_match, key_slice); + if (rc == 0) { + const rocksdb::Slice &rkey = m_scan_it->key(); + if (!kd.covers_key(rkey)) { + /* The record we've got is not from this index */ + rc = HA_ERR_KEY_NOT_FOUND; + } else if (find_flag == HA_READ_PREFIX_LAST) { + uint size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + key, keypart_map); + rocksdb::Slice lookup_tuple(reinterpret_cast(m_sk_packed_tuple), + size); + + // We need to compare the key we've got with the original search prefix. + if (!kd.value_matches_prefix(rkey, lookup_tuple)) { + rc = HA_ERR_KEY_NOT_FOUND; + } + } + } + break; + default: + DBUG_ASSERT(0); + break; + } + + return rc; +} + +int ha_rocksdb::calc_eq_cond_len(const Rdb_key_def &kd, + const enum ha_rkey_function &find_flag, + const rocksdb::Slice &slice, + const int &bytes_changed_by_succ, + const key_range *const end_key, + uint *const end_key_packed_size) { + if (find_flag == HA_READ_KEY_EXACT) + return slice.size(); + + if (find_flag == HA_READ_PREFIX_LAST) { + /* + We have made the kd.successor(m_sk_packed_tuple) call above. + + The slice is at least Rdb_key_def::INDEX_NUMBER_SIZE bytes long. + */ + return slice.size() - bytes_changed_by_succ; + } + + if (end_key) { + *end_key_packed_size = + kd.pack_index_tuple(table, m_pack_buffer, m_end_key_packed_tuple, + end_key->key, end_key->keypart_map); + + /* + Calculating length of the equal conditions here. 4 byte index id is + included. + Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3) + WHERE id1=1 AND id2=1 AND id3>=2 => eq_cond_len= 4+8+4= 16 + WHERE id1=1 AND id2>=1 AND id3>=2 => eq_cond_len= 4+8= 12 + Example2: id1 VARCHAR(30), id2 INT, PRIMARY KEY (id1, id2) + WHERE id1 = 'AAA' and id2 < 3; => eq_cond_len=13 (varchar used 9 bytes) + */ + rocksdb::Slice end_slice(reinterpret_cast(m_end_key_packed_tuple), + *end_key_packed_size); + return slice.difference_offset(end_slice); + } + + /* + On range scan without any end key condition, there is no + eq cond, and eq cond length is the same as index_id size (4 bytes). + Example1: id1 BIGINT, id2 INT, id3 BIGINT, PRIMARY KEY (id1, id2, id3) + WHERE id1>=1 AND id2 >= 2 and id2 <= 5 => eq_cond_len= 4 + */ + return Rdb_key_def::INDEX_NUMBER_SIZE; +} + +int ha_rocksdb::read_row_from_primary_key(uchar *const buf) { + DBUG_ASSERT(buf != nullptr); + + int rc; + const rocksdb::Slice &rkey = m_scan_it->key(); + const uint pk_size = rkey.size(); + const char *pk_data = rkey.data(); + + memcpy(m_pk_packed_tuple, pk_data, pk_size); + m_last_rowkey.copy(pk_data, pk_size, &my_charset_bin); + + if (m_lock_rows != RDB_LOCK_NONE) { + /* We need to put a lock and re-read */ + rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); + } else { + /* Unpack from the row we've read */ + const rocksdb::Slice &value = m_scan_it->value(); + rc = convert_record_from_storage_format(&rkey, &value, buf); + } + + return rc; +} + +int ha_rocksdb::read_row_from_secondary_key(uchar *const buf, + const Rdb_key_def &kd, + bool move_forward) { + DBUG_ASSERT(buf != nullptr); + + int rc = 0; + uint pk_size; + + if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { + /* Get the key columns and primary key value */ + const rocksdb::Slice &rkey = m_scan_it->key(); + pk_size = + kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); + const rocksdb::Slice &value = m_scan_it->value(); + if (pk_size == RDB_INVALID_KEY_LEN || + kd.unpack_record(table, buf, &rkey, &value, + m_verify_row_debug_checksums)) { + rc = HA_ERR_INTERNAL_ERROR; + } + } else { + if (kd.m_is_reverse_cf) + move_forward = !move_forward; + + rc = find_icp_matching_index_rec(move_forward, buf); + if (!rc) { + const rocksdb::Slice &rkey = m_scan_it->key(); + pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, + m_pk_packed_tuple); + if (pk_size == RDB_INVALID_KEY_LEN) { + rc = HA_ERR_INTERNAL_ERROR; + } else { + rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); + } + } + } + + if (!rc) { + m_last_rowkey.copy((const char *)m_pk_packed_tuple, pk_size, + &my_charset_bin); + } + + return rc; +} + +/** + @note + The problem with this function is that SQL layer calls it, when + - the table has not been yet opened (no ::open() call done) + - this->table_share already exists, but it is in the process of being + filled, so some of fields are still NULL. + - In particular, table_share->key_info[inx].key_part[] is filled only up + to part #part. Subsequent key parts are not yet filled. + + To complicate things further, SQL layer will call index_flags() with + all_parts=TRUE. Essentially, we're asked to provide flags for reading + keyparts whose datatype is not yet known. + + We walk around this problem by using check_keyread_allowed(), which uses + table_share object and is careful not to step on unitialized data. + + When we get a call with all_parts=TRUE, we try to analyze all parts but + ignore those that have key_part->field==nullptr (these are not initialized + yet). +*/ + +ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const { + DBUG_ENTER_FUNC(); + + ulong base_flags = HA_READ_NEXT | // doesn't seem to be used + HA_READ_ORDER | HA_READ_RANGE | HA_READ_PREV; + + if (check_keyread_allowed(inx, part, all_parts)) + base_flags |= HA_KEYREAD_ONLY; + + if (inx == table_share->primary_key) { + /* + Index-only reads on primary key are the same as table scan for us. Still, + we need to explicitly "allow" them, otherwise SQL layer will miss some + plans. + */ + base_flags |= HA_KEYREAD_ONLY | HA_CLUSTERED_INDEX; + } else { + /* + We can Index Condition Pushdown any key except the primary. With primary + key, we get (pk, record) pair immediately, there is no place to put the + ICP check. + */ + base_flags |= HA_DO_INDEX_COND_PUSHDOWN; + } + + DBUG_RETURN(base_flags); +} + +/** + @brief + Read next index tuple through the secondary index. + + @details + m_scan_it points at the index key-value pair that we should read the (pk,row) + pair for. +*/ +int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { + DBUG_ASSERT(buf != nullptr); + DBUG_ASSERT(table != nullptr); +#ifdef MARIAROCKS_NOT_YET + stats.rows_requested++; +#endif + /* Use STATUS_NOT_FOUND when record not found or some error occurred */ + table->status = STATUS_NOT_FOUND; + + if (m_scan_it->Valid()) { + rocksdb::Slice key = m_scan_it->key(); + + /* Check if we've ran out of records of this index */ + if (m_key_descr_arr[keyno]->covers_key(key)) { + int rc = 0; + + // TODO: We could here check if we have ran out of range we're scanning + const uint size = m_key_descr_arr[keyno]->get_primary_key_tuple( + table, *m_pk_descr, &key, m_pk_packed_tuple); + if (size == RDB_INVALID_KEY_LEN) { + return HA_ERR_INTERNAL_ERROR; + } + + m_last_rowkey.copy((const char *)m_pk_packed_tuple, size, + &my_charset_bin); + + if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && + !has_hidden_pk(table)) { + rocksdb::Slice value = m_scan_it->value(); + if (m_key_descr_arr[keyno]->unpack_record( + table, buf, &key, &value, m_verify_row_debug_checksums)) { + return HA_ERR_INTERNAL_ERROR; + } + } else { + rc = get_row_by_rowid(buf, m_pk_packed_tuple, size); + } + + if (!rc) { + table->status = 0; +#ifdef MARIAROCKS_NOT_YET + stats.rows_read++; + stats.rows_index_next++; +#endif + update_row_stats(ROWS_READ); + } + return rc; + } + } + return HA_ERR_END_OF_FILE; +} + +bool ha_rocksdb::is_using_full_key(key_part_map keypart_map, + uint actual_key_parts) +{ + return (keypart_map == HA_WHOLE_KEY) || + (keypart_map == ((key_part_map(1) << actual_key_parts) + - 1)); +} + +/* + ha_rocksdb::read_range_first overrides handler::read_range_first. + The only difference from handler::read_range_first is that + ha_rocksdb::read_range_first passes end_key to + ha_rocksdb::index_read_map_impl function. +*/ +int ha_rocksdb::read_range_first(const key_range *const start_key, + const key_range *const end_key, + bool eq_range_arg, bool sorted) { + DBUG_ENTER_FUNC(); + + int result; + + eq_range = eq_range_arg; +#ifdef MARIAROCKS_NOT_YET + // Range scan direction is used to get ICP to work for backwards scans + set_end_range(end_key, RANGE_SCAN_ASC); +#else + set_end_range(end_key); +#endif + + range_key_part = table->key_info[active_index].key_part; + + if (!start_key) // Read first record + result = ha_index_first(table->record[0]); + else { +#ifdef MARIAROCKS_NOT_YET + if (is_using_prohibited_gap_locks( + is_using_full_unique_key(active_index, start_key->keypart_map, + start_key->flag))) { + DBUG_RETURN(HA_ERR_LOCK_DEADLOCK); + } +#endif + increment_statistics(&SSV::ha_read_key_count); + + MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, { + result = + index_read_map_impl(table->record[0], start_key->key, + start_key->keypart_map, start_key->flag, end_key); + }) + } + if (result) + DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) ? HA_ERR_END_OF_FILE : result); + + if (compare_key(end_range) <= 0) { + DBUG_RETURN(HA_EXIT_SUCCESS); + } else { + /* + The last read row does not fall in the range. So request + storage engine to release row lock if possible. + */ + unlock_row(); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } +} + +int ha_rocksdb::prepare_index_scan() +{ + range_key_part= table->key_info[active_index].key_part; + set_end_range(NULL); + return 0; +} + + +int ha_rocksdb::prepare_range_scan(const key_range *start_key, + const key_range *end_key) +{ + range_key_part= table->key_info[active_index].key_part; + set_end_range(end_key); + return 0; +} + + +int ha_rocksdb::index_read_map(uchar *const buf, const uchar *const key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(index_read_map_impl(buf, key, keypart_map, find_flag, nullptr)); +} + +/* + See storage/rocksdb/rocksdb-range-access.txt for description of how MySQL + index navigation commands are converted into RocksDB lookup commands. + + This function takes end_key as an argument, and it is set on range scan. + MyRocks needs to decide whether prefix bloom filter can be used or not. + To decide to use prefix bloom filter or not, calculating equal condition + length + is needed. On equal lookups (find_flag == HA_READ_KEY_EXACT), equal + condition length is the same as rocksdb::Slice.size() of the start key. + On range scan, equal condition length is MIN(start_key, end_key) of the + rocksdb::Slice expression. +*/ +int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, + key_part_map keypart_map, + enum ha_rkey_function find_flag, + const key_range *end_key) { + DBUG_ENTER_FUNC(); + + int rc = 0; + + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + const uint actual_key_parts = kd.get_key_parts(); + bool using_full_key = is_using_full_key(keypart_map, actual_key_parts); + + if (!end_key) + end_key = end_range; + + /* By default, we don't need the retrieved records to match the prefix */ + m_sk_match_prefix = nullptr; +#ifdef MARIAROCKS_NOT_YET + stats.rows_requested++; +#endif + if (active_index == table->s->primary_key && find_flag == HA_READ_KEY_EXACT && + using_full_key) { + /* + Equality lookup over primary key, using full tuple. + This is a special case, use DB::Get. + */ + const uint size = kd.pack_index_tuple(table, m_pack_buffer, + m_pk_packed_tuple, key, keypart_map); + bool skip_lookup = is_blind_delete_enabled(); + rc = get_row_by_rowid(buf, m_pk_packed_tuple, size, + skip_lookup); + if (!rc && !skip_lookup) { +#ifdef MARIAROCKS_NOT_YET + stats.rows_read++; + stats.rows_index_first++; +#endif + update_row_stats(ROWS_READ); + } + DBUG_RETURN(rc); + } + + /* + Unique secondary index performs lookups without the extended key fields + */ + uint packed_size; + if (active_index != table->s->primary_key && + table->key_info[active_index].flags & HA_NOSAME && + find_flag == HA_READ_KEY_EXACT && using_full_key) { + key_part_map tmp_map = (key_part_map(1) << table->key_info[active_index] + .user_defined_key_parts) - + 1; + packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + key, tmp_map); + if (table->key_info[active_index].user_defined_key_parts != + kd.get_key_parts()) + using_full_key = false; + } else { + packed_size = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + key, keypart_map); + } + + if ((pushed_idx_cond && pushed_idx_cond_keyno == active_index) && + (find_flag == HA_READ_KEY_EXACT || find_flag == HA_READ_PREFIX_LAST)) { + /* + We are doing a point index lookup, and ICP is enabled. It is possible + that this call will be followed by ha_rocksdb->index_next_same() call. + + Do what InnoDB does: save the lookup tuple now. We will need it in + index_next_same/find_icp_matching_index_rec in order to stop scanning + as soon as index record doesn't match the lookup tuple. + + When not using ICP, handler::index_next_same() will make sure that rows + that don't match the lookup prefix are not returned. + row matches the lookup prefix. + */ + m_sk_match_prefix = m_sk_match_prefix_buf; + m_sk_match_length = packed_size; + memcpy(m_sk_match_prefix, m_sk_packed_tuple, packed_size); + } + + int bytes_changed_by_succ = 0; + if (find_flag == HA_READ_PREFIX_LAST_OR_PREV || + find_flag == HA_READ_PREFIX_LAST || find_flag == HA_READ_AFTER_KEY) { + /* See below */ + bytes_changed_by_succ = kd.successor(m_sk_packed_tuple, packed_size); + } + + rocksdb::Slice slice(reinterpret_cast(m_sk_packed_tuple), + packed_size); + + uint end_key_packed_size = 0; + const uint eq_cond_len = + calc_eq_cond_len(kd, find_flag, slice, bytes_changed_by_succ, end_key, + &end_key_packed_size); + + bool use_all_keys = false; + if (find_flag == HA_READ_KEY_EXACT && + my_count_bits(keypart_map) == kd.get_key_parts()) + use_all_keys = true; + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + const bool is_new_snapshot = !tx->has_snapshot(); + // Loop as long as we get a deadlock error AND we end up creating the + // snapshot here (i.e. it did not exist prior to this) + for (;;) { + /* + This will open the iterator and position it at a record that's equal or + greater than the lookup tuple. + */ + setup_scan_iterator(kd, &slice, use_all_keys, is_ascending(kd, find_flag), + eq_cond_len); + + /* + Once we are positioned on from above, move to the position we really + want: See storage/rocksdb/rocksdb-range-access.txt + */ + bool move_forward; + rc = position_to_correct_key(kd, find_flag, using_full_key, key, + keypart_map, slice, &move_forward); + + if (rc) { + /* This status is returned on any error */ + table->status = STATUS_NOT_FOUND; + DBUG_RETURN(rc); + } + + m_skip_scan_it_next_call = false; + + /* + Now get the data for the row into 'buf'. If we were using a primary key + then we have all the rows we need. For a secondary key we now need to + lookup the primary key. + */ + if (active_index == table->s->primary_key) + rc = read_row_from_primary_key(buf); + else + rc = read_row_from_secondary_key(buf, kd, move_forward); + + if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) + break; /* Exit the loop */ + + // release the snapshot and iterator so they will be regenerated + tx->release_snapshot(); + release_scan_iterator(); + } + + if (rc) { + /* the only possible error condition is record-not-found */ + table->status = STATUS_NOT_FOUND; + } else { + table->status = 0; +#ifdef MARIAROCKS_NOT_YET + stats.rows_read++; + stats.rows_index_first++; +#endif + update_row_stats(ROWS_READ); + } + + DBUG_RETURN(rc); +} + +/* + @brief + Scan the secondary index until we find an index record that satisfies ICP + + @param move_forward TRUE <=> move m_scan_it forward + FALSE <=> move m_scan_it backward + @param buf Record buffer (must be the same buffer that + pushed index condition points to, in practice + it is table->record[0]) + + @detail + Move the current iterator m_scan_it until we get an index tuple that + satisfies the pushed Index Condition. + (if there is no pushed index condition, return right away) + + @return + 0 - Index tuple satisfies ICP, can do index read. + other - error code +*/ + +int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, + uchar *const buf) { + DBUG_ASSERT(buf != nullptr); + + if (pushed_idx_cond && pushed_idx_cond_keyno == active_index) { + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + + while (1) { + if (!m_scan_it->Valid()) { + table->status = STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + const rocksdb::Slice rkey = m_scan_it->key(); + + if (!kd.covers_key(rkey)) { + table->status = STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + + if (m_sk_match_prefix) { + const rocksdb::Slice prefix((const char *)m_sk_match_prefix, + m_sk_match_length); + if (!kd.value_matches_prefix(rkey, prefix)) { + table->status = STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + } + + const rocksdb::Slice value = m_scan_it->value(); + if (kd.unpack_record(table, buf, &rkey, &value, + m_verify_row_debug_checksums)) { + return HA_ERR_INTERNAL_ERROR; + } + + const enum icp_result icp_status= handler_index_cond_check(this); + if (icp_status == ICP_NO_MATCH) { + if (move_forward) + m_scan_it->Next(); + else + m_scan_it->Prev(); + continue; /* Get the next (or prev) index tuple */ + } + else if (icp_status == ICP_OUT_OF_RANGE || icp_status == ICP_ABORTED_BY_USER) { + /* We have walked out of range we are scanning */ + table->status = STATUS_NOT_FOUND; + return HA_ERR_END_OF_FILE; + } + else /* icp_status == ICP_MATCH */ + { + /* Index Condition is satisfied. We have rc==0, proceed to fetch the + * row. */ + break; + } + /* + TODO: should we have this here, or RockDB handles this internally? + if (my_core::thd_killed(current_thd)) + { + rc= HA_ERR_INTERNAL_ERROR; // doesn't matter + break; + } + */ + } + } + return HA_EXIT_SUCCESS; +} + + +int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(check_opt != nullptr); + + const uint pk = pk_index(table, m_tbl_def); + String rowkey_copy; + String sec_key_copy; + const char *const table_name = table->s->table_name.str; + + // Only when debugging: don't use snapshot when reading + // Rdb_transaction *tx= get_or_create_tx(table->in_use); + // tx->snapshot= nullptr; + + const bool save_verify_row_debug_checksums = m_verify_row_debug_checksums; + m_verify_row_debug_checksums = true; + + /* For each secondary index, check that we can get a PK value from it */ + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: Checking table %s", table_name, + table_name); + ha_rows UNINIT_VAR(row_checksums_at_start); // set/used iff first_index==true + ha_rows row_checksums = ha_rows(-1); + bool first_index = true; + + for (uint keyno = 0; keyno < table->s->keys; keyno++) { + if (keyno != pk) { + extra(HA_EXTRA_KEYREAD); + ha_index_init(keyno, true); + ha_rows rows = 0; + ha_rows checksums = 0; + if (first_index) + row_checksums_at_start = m_row_checksums_checked; + int res; + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: Checking index %s", table_name, + table->key_info[keyno].name); + while (1) { + if (!rows) + res = index_first(table->record[0]); + else + res = index_next(table->record[0]); + + if (res == HA_ERR_END_OF_FILE) + break; + if (res) { + // error + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: index scan error %d", + table_name, rows, res); + goto error; + } + rocksdb::Slice key = m_scan_it->key(); + sec_key_copy.copy(key.data(), key.size(), &my_charset_bin); + rowkey_copy.copy(m_last_rowkey.ptr(), m_last_rowkey.length(), + &my_charset_bin); + + if (m_key_descr_arr[keyno]->unpack_info_has_checksum( + m_scan_it->value())) { + checksums++; + } + + if ((res = get_row_by_rowid(table->record[0], rowkey_copy.ptr(), + rowkey_copy.length()))) { + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: " + "failed to fetch row by rowid", + table_name, rows); + goto error; + } + + longlong hidden_pk_id = 0; + if (has_hidden_pk(table) && + read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + goto error; + + /* Check if we get the same PK value */ + uint packed_size = m_pk_descr->pack_record( + table, m_pack_buffer, table->record[0], m_pk_packed_tuple, nullptr, + false, hidden_pk_id); + if (packed_size != rowkey_copy.length() || + memcmp(m_pk_packed_tuple, rowkey_copy.ptr(), packed_size)) { + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: PK value mismatch", + table_name, rows); + goto print_and_error; + } + + /* Check if we get the same secondary key value */ + packed_size = m_key_descr_arr[keyno]->pack_record( + table, m_pack_buffer, table->record[0], m_sk_packed_tuple, + &m_sk_tails, false, hidden_pk_id); + if (packed_size != sec_key_copy.length() || + memcmp(m_sk_packed_tuple, sec_key_copy.ptr(), packed_size)) { + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: .. row %lld: " + "secondary index value mismatch", + table_name, rows); + goto print_and_error; + } + rows++; + continue; + + print_and_error : { + std::string buf; + buf = rdb_hexdump(rowkey_copy.ptr(), rowkey_copy.length(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: rowkey: %s", table_name, buf.c_str()); + + buf = rdb_hexdump(m_retrieved_record.data(), m_retrieved_record.size(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: record: %s", table_name, buf.c_str()); + + buf = rdb_hexdump(sec_key_copy.ptr(), sec_key_copy.length(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("CHECKTABLE %s: index: %s", table_name, buf.c_str()); + + goto error; + } + } + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: ... %lld index entries checked " + "(%lld had checksums)", + table_name, rows, checksums); + + if (first_index) { + row_checksums = m_row_checksums_checked - row_checksums_at_start; + first_index = false; + } + ha_index_end(); + } + } + if (row_checksums != ha_rows(-1)) { + // NO_LINT_DEBUG + sql_print_information("CHECKTABLE %s: %lld table records had checksums", + table_name, row_checksums); + } + extra(HA_EXTRA_NO_KEYREAD); + + m_verify_row_debug_checksums = save_verify_row_debug_checksums; + /* + TODO: we should check also for PK records that are missing in the secondary + indexes. + For that, need to walk through the PK and check that every PK record has a + proper counterpart in each secondary index. + */ + DBUG_RETURN(HA_ADMIN_OK); +error: + m_verify_row_debug_checksums = save_verify_row_debug_checksums; + ha_index_or_rnd_end(); + extra(HA_EXTRA_NO_KEYREAD); + + DBUG_RETURN(HA_ADMIN_CORRUPT); +} + +static void dbug_dump_str(FILE *const out, const char *const str, int len) { + fprintf(out, "\""); + for (int i = 0; i < len; i++) { + if (str[i] > 32) + fprintf(out, "%c", str[i]); + else + fprintf(out, "\\%d", str[i]); + } + fprintf(out, "\""); +} + +/* + Debugging help: dump the whole database into a human-readable file. + Usage: + dbug_dump_database(rdb); +*/ + +void dbug_dump_database(rocksdb::DB *const db) { + FILE *const out = fopen("/tmp/rocksdb.dump", "wt"); + if (!out) + return; + + rocksdb::Iterator *it = db->NewIterator(rocksdb::ReadOptions()); + for (it->SeekToFirst(); it->Valid(); it->Next()) { + rocksdb::Slice key = it->key(); + rocksdb::Slice val = it->value(); + dbug_dump_str(out, key.data(), key.size()); + fprintf(out, " -> "); + dbug_dump_str(out, val.data(), val.size()); + fprintf(out, "\n"); + } + + delete it; + fclose(out); +} + +rocksdb::Status ha_rocksdb::get_for_update( + Rdb_transaction *const tx, rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value) const { + DBUG_ASSERT(m_lock_rows != RDB_LOCK_NONE); + const bool exclusive = m_lock_rows != RDB_LOCK_READ; + + rocksdb::Status s = tx->get_for_update(column_family, key, value, exclusive); + + // If we have a lock conflict and we are running in READ COMMITTTED mode + // release and reacquire the snapshot and then retry the get_for_update(). + if (s.IsBusy() && !s.IsDeadlock() && + my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) { + tx->release_snapshot(); + tx->acquire_snapshot(false); + + s = tx->get_for_update(column_family, key, value, exclusive); + } + + return s; +} + +bool ha_rocksdb::is_blind_delete_enabled() +{ + THD *thd = ha_thd(); + return (THDVAR(thd, blind_delete_primary_key) && + thd->lex->sql_command == SQLCOM_DELETE && + thd->lex->table_count == 1 && + table->s->keys == 1 && + !has_hidden_pk(table) && + !thd->rgi_slave); +} + +/* + Given a rowid (i.e. packed PK) as a parameter, get the record. + + @return + 0 - Ok + other - HA_ERR error code. +*/ + +int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, + const uint rowid_size, + const bool skip_lookup) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); + DBUG_ASSERT(rowid != nullptr); + DBUG_ASSERT(table != nullptr); + + int rc; + + rocksdb::Slice key_slice(rowid, rowid_size); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + DEBUG_SYNC(ha_thd(), "rocksdb.get_row_by_rowid"); + DBUG_EXECUTE_IF("dbug.rocksdb.get_row_by_rowid", { + THD *thd = ha_thd(); + const char act[] = "now signal Reached " + "wait_for signal.rocksdb.get_row_by_rowid_let_running"; + DBUG_ASSERT(opt_debug_sync_timeout > 0); + DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act))); + };); + + bool found; + rocksdb::Status s; + + /* Pretend row found without looking up */ + if (skip_lookup) + { +#ifdef MARIAROCKS_NOT_YET + stats.rows_deleted_blind++; +#endif + update_row_stats(ROWS_DELETED_BLIND); + m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); + table->status = 0; + DBUG_RETURN(0); + } + + if (m_lock_rows == RDB_LOCK_NONE) { + tx->acquire_snapshot(true); + s = tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record); + } else { + s = get_for_update(tx, m_pk_descr->get_cf(), key_slice, + &m_retrieved_record); + } + + if (!s.IsNotFound() && !s.ok()) { + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); + } + found = !s.IsNotFound(); + + table->status = STATUS_NOT_FOUND; + if (found) { + m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); + rc = convert_record_from_storage_format(&key_slice, buf); + if (!rc) + table->status = 0; + } else { + /* + Note: we don't need to unlock the row. It is intentional that we keep + locks on rows that don't exist. + */ + rc = HA_ERR_KEY_NOT_FOUND; + } + + DBUG_RETURN(rc); +} + +int ha_rocksdb::index_next(uchar *const buf) { + DBUG_ENTER_FUNC(); + + bool moves_forward = true; + if (m_key_descr_arr[active_index]->m_is_reverse_cf) { + moves_forward = false; + } + + int rc = index_next_with_direction(buf, moves_forward); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + + DBUG_RETURN(rc); +} + +int ha_rocksdb::index_prev(uchar *const buf) { + DBUG_ENTER_FUNC(); + + bool moves_forward = false; + if (m_key_descr_arr[active_index]->m_is_reverse_cf) { + moves_forward = true; + } + + int rc = index_next_with_direction(buf, moves_forward); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + + DBUG_RETURN(rc); +} + +int ha_rocksdb::index_next_with_direction(uchar *const buf, bool move_forward) { + DBUG_ENTER_FUNC(); + + int rc; + + if (active_index == pk_index(table, m_tbl_def)) { + rc = rnd_next_with_direction(buf, move_forward); + } else { + if (m_skip_scan_it_next_call) { + m_skip_scan_it_next_call = false; + } else { + if (move_forward) + m_scan_it->Next(); /* this call cannot fail */ + else + m_scan_it->Prev(); + } + rc = find_icp_matching_index_rec(move_forward, buf); + if (!rc) + rc = secondary_index_read(active_index, buf); + } + + DBUG_RETURN(rc); +} + +int ha_rocksdb::index_first(uchar *const buf) { + DBUG_ENTER_FUNC(); + + m_sk_match_prefix = nullptr; + int rc = m_key_descr_arr[active_index]->m_is_reverse_cf + ? index_last_intern(buf) + : index_first_intern(buf); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + + DBUG_RETURN(rc); +} + +int ha_rocksdb::index_last(uchar *const buf) { + DBUG_ENTER_FUNC(); + + m_sk_match_prefix = nullptr; + int rc = m_key_descr_arr[active_index]->m_is_reverse_cf + ? index_first_intern(buf) + : index_last_intern(buf); + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + + DBUG_RETURN(rc); +} + +/* + Start scanning from the "first" value. + + The 'first' here means "the first from start of the key space". + For reverse-ordered key spaces, the first value will be the biggest, 'zzzz'. + + An picture of a forward-ordered keyspace (remember, the keys have form + 'indexnr-keyval'. Suppose the index we are at has number n) + + (n-1) - ... + ( n ) <--- 1. (n) doesn't exist in the db but it would be here. + ( n ) - aaa <--- 2. Seek("n") will put us here on the first index + ( n ) - bbb record. + ( n ) - cc + + So, need to do: Seek(n); + + A backward-ordered keyspace: + + (n+1) - bbb + (n+1) - aaa + (n+1) <--- (n+1) doesn't exist in the db but would be here. + ( n ) - ccc <--- 1. We need to be here. + ( n ) - bbb + ( n ) - aaa + ( n ) + + So, need to: Seek(n+1); + +*/ + +int ha_rocksdb::index_first_intern(uchar *const buf) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); + + uchar *key; + uint key_size; + int rc; + + if (is_pk(active_index, table, m_tbl_def)) { + key = m_pk_packed_tuple; + } else { + key = m_sk_packed_tuple; + } + + DBUG_ASSERT(key != nullptr); + + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + if (kd.m_is_reverse_cf) { + kd.get_supremum_key(key, &key_size); + } else { + kd.get_infimum_key(key, &key_size); + } + + rocksdb::Slice index_key((const char *)key, key_size); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + const bool is_new_snapshot = !tx->has_snapshot(); + // Loop as long as we get a deadlock error AND we end up creating the + // snapshot here (i.e. it did not exist prior to this) + for (;;) { + setup_scan_iterator(kd, &index_key, false, !kd.m_is_reverse_cf, + Rdb_key_def::INDEX_NUMBER_SIZE); + m_scan_it->Seek(index_key); + m_skip_scan_it_next_call = true; + + rc = index_next_with_direction(buf, true); + if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) + break; // exit the loop + + // release the snapshot and iterator so they will be regenerated + tx->release_snapshot(); + release_scan_iterator(); + } + + if (!rc) { + /* + index_next is always incremented on success, so decrement if it is + index_first instead + */ +#ifdef MARIAROCKS_NOT_YET + stats.rows_index_first++; + stats.rows_index_next--; +#endif + } + + DBUG_RETURN(rc); +} + +/** + @details + Start scanning from the "last" value + + The 'last' here means "the last from start of the key space". + For reverse-ordered key spaces, we will actually read the smallest value. + + An picture of a forward-ordered keyspace (remember, the keys have form + 'indexnr-keyval'. Suppose the we are at a key that has number n) + + (n-1)-something + ( n )-aaa + ( n )-bbb + ( n )-ccc <----------- Need to seek to here. + (n+1) <---- Doesn't exist, but would be here. + (n+1)-smth, or no value at all + + RocksDB's Iterator::Seek($val) seeks to "at $val or first value that's + greater". We can't see to "(n)-ccc" directly, because we don't know what + is the value of 'ccc' (the biggest record with prefix (n)). Instead, we seek + to "(n+1)", which is the least possible value that's greater than any value + in index #n. Then we step one record back. + + So, need to: it->Seek(n+1) || it->SeekToLast(); it->Prev(); + + A backward-ordered keyspace: + + (n+1)-something + ( n ) - ccc + ( n ) - bbb + ( n ) - aaa <---------------- (*) Need to seek here. + ( n ) <--- Doesn't exist, but would be here. + (n-1)-smth, or no value at all + + So, need to: it->Seek(n) || it->SeekToLast(); it->Prev(); +*/ + +int ha_rocksdb::index_last_intern(uchar *const buf) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); + + uchar *key; + uint key_size; + int rc; + + if (is_pk(active_index, table, m_tbl_def)) { + key = m_pk_packed_tuple; + } else { + key = m_sk_packed_tuple; + } + + DBUG_ASSERT(key != nullptr); + + const Rdb_key_def &kd = *m_key_descr_arr[active_index]; + if (kd.m_is_reverse_cf) { + kd.get_infimum_key(key, &key_size); + } else { + kd.get_supremum_key(key, &key_size); + } + + rocksdb::Slice index_key((const char *)key, key_size); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + bool is_new_snapshot = !tx->has_snapshot(); + // Loop as long as we get a deadlock error AND we end up creating the + // snapshot here (i.e. it did not exist prior to this) + for (;;) { + setup_scan_iterator(kd, &index_key, false, kd.m_is_reverse_cf, + Rdb_key_def::INDEX_NUMBER_SIZE); + m_scan_it->SeekForPrev(index_key); + m_skip_scan_it_next_call = false; + + if (is_pk(active_index, table, m_tbl_def)) { + m_skip_scan_it_next_call = true; + rc = rnd_next_with_direction(buf, false); + } else { + rc = find_icp_matching_index_rec(false /*move_forward*/, buf); + if (!rc) + rc = secondary_index_read(active_index, buf); + } + + if (rc != HA_ERR_LOCK_DEADLOCK || !is_new_snapshot) + break; /* exit the loop */ + + // release the snapshot and iterator so they will be regenerated + tx->release_snapshot(); + release_scan_iterator(); + } + + if (!rc) { + /* + index_next is always incremented on success, so decrement if it is + index_first instead + */ +#ifdef MARIAROCKS_NOT_YET + stats.rows_index_first++; + stats.rows_index_next--; +#endif + } + + DBUG_RETURN(rc); +} + +void ha_rocksdb::unlock_row() { + DBUG_ENTER_FUNC(); + + if (m_lock_rows != RDB_LOCK_NONE) { + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + tx->release_lock(m_pk_descr->get_cf(), + std::string(m_last_rowkey.ptr(), m_last_rowkey.length())); + } + + DBUG_VOID_RETURN; +} + +/* + Returning true if SingleDelete can be used. + - Secondary Indexes can always use SingleDelete. + - If the index is PRIMARY KEY, and if all of the columns of the table + are covered by the PRIMARY KEY, SingleDelete can be used. +*/ +bool ha_rocksdb::can_use_single_delete(const uint &index) const { + return (index != pk_index(table, m_tbl_def) || + (!has_hidden_pk(table) && + table->key_info[index].ext_key_parts == table->s->fields)); +} + +bool ha_rocksdb::skip_unique_check() const { + /* + We want to skip unique checks if: + 1) bulk_load is on + 2) this table is in the whitelist of tables to skip and the replication + lag has reached a large enough value (see unique_check_lag_threshold + and unique_check_lage_reset_threshold) + 3) the user set unique_checks option to 0, and the table does not have + any indexes. If the table has secondary keys, then those might becomes + inconsisted/corrupted + */ + return THDVAR(table->in_use, bulk_load) || + (m_force_skip_unique_check && m_skip_unique_check) || + (my_core::thd_test_options(table->in_use, + OPTION_RELAXED_UNIQUE_CHECKS) && + m_tbl_def->m_key_count == 1); +} + +#ifdef MARIAROCKS_NOT_YET // MDEV-10975 +void ha_rocksdb::set_force_skip_unique_check(bool skip) { + DBUG_ENTER_FUNC(); + + m_force_skip_unique_check = skip; + + DBUG_VOID_RETURN; +} +#endif + +bool ha_rocksdb::commit_in_the_middle() { + return THDVAR(table->in_use, bulk_load) || + THDVAR(table->in_use, commit_in_the_middle); +} + +/* + Executing bulk commit if it should. + @retval true if bulk commit failed + @retval false if bulk commit was skipped or succeeded +*/ +bool ha_rocksdb::do_bulk_commit(Rdb_transaction *const tx) { + DBUG_ASSERT(tx != nullptr); + return commit_in_the_middle() && + tx->get_write_count() >= THDVAR(table->in_use, bulk_load_size) && + tx->flush_batch(); +} + +/* + If table was created without primary key, SQL layer represents the primary + key number as MAX_INDEXES. Hence, this function returns true if the table + does not contain a primary key. (In which case we generate a hidden + 'auto-incremented' pk.) +*/ +bool ha_rocksdb::has_hidden_pk(const TABLE *const table) const { + DBUG_ASSERT(table != nullptr); + return Rdb_key_def::table_has_hidden_pk(table); +} + +/* + Returns true if given index number is a hidden_pk. + - This is used when a table is created with no primary key. +*/ +bool ha_rocksdb::is_hidden_pk(const uint index, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + return (table_arg->s->primary_key == MAX_INDEXES && + index == tbl_def_arg->m_key_count - 1); +} + +/* Returns index of primary key */ +uint ha_rocksdb::pk_index(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + return table_arg->s->primary_key == MAX_INDEXES ? tbl_def_arg->m_key_count - 1 + : table_arg->s->primary_key; +} + +/* Returns true if given index number is a primary key */ +bool ha_rocksdb::is_pk(const uint index, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + return index == table_arg->s->primary_key || + is_hidden_pk(index, table_arg, tbl_def_arg); +} + +/* + Formats the string and returns the column family name assignment part for a + specific partition. +*/ +const std::string ha_rocksdb::gen_cf_name_qualifier_for_partition( + const std::string& prefix) { + DBUG_ASSERT(!prefix.empty()); + + return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + RDB_CF_NAME_QUALIFIER + + RDB_PER_PARTITION_QUALIFIER_VALUE_SEP; +} + +const char *ha_rocksdb::get_key_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + if (is_hidden_pk(index, table_arg, tbl_def_arg)) { + return HIDDEN_PK_NAME; + } + + DBUG_ASSERT(table_arg->key_info != nullptr); + DBUG_ASSERT(table_arg->key_info[index].name != nullptr); + + return table_arg->key_info[index].name; +} + +const char *ha_rocksdb::get_key_comment(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + + if (is_hidden_pk(index, table_arg, tbl_def_arg)) { + return nullptr; + } + + DBUG_ASSERT(table_arg->key_info != nullptr); + + return table_arg->key_info[index].comment.str; +} + +const std::string ha_rocksdb::generate_cf_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool *per_part_match_found) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(per_part_match_found != nullptr); + + // When creating CF-s the caller needs to know if there was a custom CF name + // specified for a given paritition. + *per_part_match_found = false; + + // Index comment is used to define the column family name specification(s). + // If there was no comment, we get an emptry string, and it means "use the + // default column family". + const char *const comment = get_key_comment(index, table_arg, tbl_def_arg); + + // `get_key_comment` can return `nullptr`, that's why this. + std::string key_comment = comment ? comment : ""; + + // If table has partitions then we need to check if user has requested to + // create a column family with a specific name on a per partition basis. + if (table_arg->part_info != nullptr) { + std::string partition_name = tbl_def_arg->base_partition(); + DBUG_ASSERT(!partition_name.empty()); + + // Let's fetch the comment for a index and check if there's a custom key + // name specified for a partition we are handling. + std::vector v = myrocks::parse_into_tokens(key_comment, + RDB_QUALIFIER_SEP); + std::string part_to_search = gen_cf_name_qualifier_for_partition( + partition_name); + DBUG_ASSERT(!part_to_search.empty()); + + // Basic O(N) search for a matching assignment. At most we expect maybe + // ten or so elements here. + for (const auto &it : v) { + if (it.substr(0, part_to_search.length()) == part_to_search) { + // We found a prefix match. Try to parse it as an assignment. + std::vector tokens = myrocks::parse_into_tokens(it, + RDB_PER_PARTITION_QUALIFIER_VALUE_SEP); + + // We found a custom name, it was in the form we expected it to be. + // Return that instead of whatever we initially wanted to return. In + // a case below the `foo` part will be returned to the caller. + // + // p3_cfname=foo + // + // If no value was specified then we'll return an empty string which + // later gets translated into using a default CF. + if (tokens.size() == 2) { + *per_part_match_found = true; + return tokens[1]; + } else { + return ""; + } + } + } + + // At this point we tried to search for a custom CF name for a partition, + // but none was specified. Therefore default one will be used. + return ""; + } + + return key_comment; +} + +int ha_rocksdb::write_row(uchar *const buf) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); + DBUG_ASSERT(buf == table->record[0]); + DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); + + /* + Note: "buf == table->record[0]" is copied from innodb. I am not aware of + any use cases where this condition is not true. + */ + if (table->next_number_field && buf == table->record[0]) { + int err; + if ((err = update_auto_increment())) { + DBUG_RETURN(err); + } + } + + const int rv = update_write_row(nullptr, buf, skip_unique_check()); + + if (rv == 0) { +#ifdef MARIAROCKS_NOT_YET + stats.rows_inserted++; +#endif + update_row_stats(ROWS_INSERTED); + } + + DBUG_RETURN(rv); +} + +/** + Constructing m_last_rowkey (MyRocks key expression) from + before_update|delete image (MySQL row expression). + m_last_rowkey is normally set during lookup phase, such as + rnd_next_with_direction() and rnd_pos(). With Read Free Replication, + these read functions are skipped and update_rows(), delete_rows() are + called without setting m_last_rowkey. This function sets m_last_rowkey + for Read Free Replication. +*/ +void ha_rocksdb::set_last_rowkey(const uchar *const old_data) { +#ifdef MARIAROCKS_NOT_YET + if (old_data && use_read_free_rpl()) { + const int old_pk_size = m_pk_descr->pack_record( + table, m_pack_buffer, old_data, m_pk_packed_tuple, nullptr, false); + m_last_rowkey.copy((const char *)m_pk_packed_tuple, old_pk_size, + &my_charset_bin); + } +#endif +} + +int ha_rocksdb::get_pk_for_update(struct update_row_info *const row_info) { + int size; + + /* + Get new row key for any insert, and any update where the pk is not hidden. + Row key for updates with hidden pk is handled below. + */ + if (!has_hidden_pk(table)) { + row_info->hidden_pk_id = 0; + + row_info->new_pk_unpack_info = &m_pk_unpack_info; + + size = m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, + m_pk_packed_tuple, + row_info->new_pk_unpack_info, false); + } else if (row_info->old_data == nullptr) { + row_info->hidden_pk_id = update_hidden_pk_val(); + size = + m_pk_descr->pack_hidden_pk(row_info->hidden_pk_id, m_pk_packed_tuple); + } else { + /* + If hidden primary key, rowkey for new record will always be the same as + before + */ + size = row_info->old_pk_slice.size(); + memcpy(m_pk_packed_tuple, row_info->old_pk_slice.data(), size); + if (read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id)) { + return HA_ERR_INTERNAL_ERROR; + } + } + + row_info->new_pk_slice = + rocksdb::Slice((const char *)m_pk_packed_tuple, size); + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, + const struct update_row_info &row_info, + bool *const found, + bool *const pk_changed) { + DBUG_ASSERT(found != nullptr); + DBUG_ASSERT(pk_changed != nullptr); + + *pk_changed = false; + + /* + For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs + always require locking. + */ + if (row_info.old_pk_slice.size() > 0) { + /* + If the keys are the same, then no lock is needed + */ + if (!Rdb_pk_comparator::bytewise_compare(row_info.new_pk_slice, + row_info.old_pk_slice)) { + *found = false; + return HA_EXIT_SUCCESS; + } + + *pk_changed = true; + } + + /* + Perform a read to determine if a duplicate entry exists. For primary + keys, a point lookup will be sufficient. + + note: we intentionally don't set options.snapshot here. We want to read + the latest committed data. + */ + + /* + To prevent race conditions like below, it is necessary to + take a lock for a target row. get_for_update() holds a gap lock if + target key does not exist, so below conditions should never + happen. + + 1) T1 Get(empty) -> T2 Get(empty) -> T1 Put(insert) -> T1 commit + -> T2 Put(overwrite) -> T2 commit + 2) T1 Get(empty) -> T1 Put(insert, not committed yet) -> T2 Get(empty) + -> T2 Put(insert, blocked) -> T1 commit -> T2 commit(overwrite) + */ + const rocksdb::Status s = + get_for_update(row_info.tx, m_pk_descr->get_cf(), row_info.new_pk_slice, + &m_retrieved_record); + if (!s.ok() && !s.IsNotFound()) { + return row_info.tx->set_status_error(table->in_use, s, + *m_key_descr_arr[key_id], m_tbl_def); + } + + *found = !s.IsNotFound(); + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::check_and_lock_sk(const uint &key_id, + const struct update_row_info &row_info, + bool *const found) const { + DBUG_ASSERT(found != nullptr); + *found = false; + + /* + Can skip checking this key if none of the key fields have changed. + */ + if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) { + return HA_EXIT_SUCCESS; + } + + KEY *key_info = nullptr; + uint n_null_fields = 0; + uint user_defined_key_parts = 1; + + key_info = &table->key_info[key_id]; + user_defined_key_parts = key_info->user_defined_key_parts; + /* + If there are no uniqueness requirements, there's no need to obtain a + lock for this key. + */ + if (!(key_info->flags & HA_NOSAME)) { + return HA_EXIT_SUCCESS; + } + + const Rdb_key_def &kd = *m_key_descr_arr[key_id]; + + /* + Calculate the new key for obtaining the lock + + For unique secondary indexes, the key used for locking does not + include the extended fields. + */ + int size = + kd.pack_record(table, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, + nullptr, false, 0, user_defined_key_parts, &n_null_fields); + if (n_null_fields > 0) { + /* + If any fields are marked as NULL this will never match another row as + to NULL never matches anything else including another NULL. + */ + return HA_EXIT_SUCCESS; + } + + const rocksdb::Slice new_slice = + rocksdb::Slice((const char *)m_sk_packed_tuple, size); + + /* + For UPDATEs, if the key has changed, we need to obtain a lock. INSERTs + always require locking. + */ + if (row_info.old_data != nullptr) { + size = kd.pack_record(table, m_pack_buffer, row_info.old_data, + m_sk_packed_tuple_old, nullptr, false, + row_info.hidden_pk_id, user_defined_key_parts); + const rocksdb::Slice old_slice = + rocksdb::Slice((const char *)m_sk_packed_tuple_old, size); + + /* + For updates, if the keys are the same, then no lock is needed + + Also check to see if the key has any fields set to NULL. If it does, then + this key is unique since NULL is not equal to each other, so no lock is + needed. + */ + if (!Rdb_pk_comparator::bytewise_compare(new_slice, old_slice)) { + return HA_EXIT_SUCCESS; + } + } + + /* + Perform a read to determine if a duplicate entry exists - since this is + a secondary indexes a range scan is needed. + + note: we intentionally don't set options.snapshot here. We want to read + the latest committed data. + */ + + const bool all_parts_used = (user_defined_key_parts == kd.get_key_parts()); + + /* + This iterator seems expensive since we need to allocate and free + memory for each unique index. + + If this needs to be optimized, for keys without NULL fields, the + extended primary key fields can be migrated to the value portion of the + key. This enables using Get() instead of Seek() as in the primary key + case. + + The bloom filter may need to be disabled for this lookup. + */ + const bool total_order_seek = !can_use_bloom_filter( + ha_thd(), kd, new_slice, all_parts_used, + is_ascending(*m_key_descr_arr[key_id], HA_READ_KEY_EXACT)); + const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); + + /* + psergey-todo: we just need to take lock, lookups not needed: + */ + std::string dummy_value; + const rocksdb::Status s = + get_for_update(row_info.tx, kd.get_cf(), new_slice, &dummy_value); + if (!s.ok() && !s.IsNotFound()) { + return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); + } + + rocksdb::Iterator *const iter = row_info.tx->get_iterator( + kd.get_cf(), total_order_seek, fill_cache, true /* read current data */, + false /* acquire snapshot */); + /* + Need to scan the transaction to see if there is a duplicate key. + Also need to scan RocksDB and verify the key has not been deleted + in the transaction. + */ + iter->Seek(new_slice); + *found = !read_key_exact(kd, iter, all_parts_used, new_slice); + delete iter; + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::check_uniqueness_and_lock( + const struct update_row_info &row_info, bool *const pk_changed) { + /* + Go through each index and determine if the index has uniqueness + requirements. If it does, then try to obtain a row lock on the new values. + Once all locks have been obtained, then perform the changes needed to + update/insert the row. + */ + for (uint key_id = 0; key_id < m_tbl_def->m_key_count; key_id++) { + bool found; + int rc; + + if (is_pk(key_id, table, m_tbl_def)) { + rc = check_and_lock_unique_pk(key_id, row_info, &found, pk_changed); + } else { + rc = check_and_lock_sk(key_id, row_info, &found); + } + + if (rc != 0) { + return rc; + } + + if (found) { + /* There is a row with this key already, so error out. */ + errkey = key_id; + m_dupp_errkey = errkey; + return HA_ERR_FOUND_DUPP_KEY; + } + } + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::check_duplicate_sk(const TABLE *table_arg, + const Rdb_key_def &index, + const rocksdb::Slice *key, + struct unique_sk_buf_info *sk_info) { + uint n_null_fields = 0; + const rocksdb::Comparator *index_comp = index.get_cf()->GetComparator(); + + /* Get proper SK buffer. */ + uchar *sk_buf = sk_info->swap_and_get_sk_buf(); + + /* Get memcmp form of sk without extended pk tail */ + uint sk_memcmp_size = + index.get_memcmp_sk_parts(table_arg, *key, sk_buf, &n_null_fields); + + sk_info->sk_memcmp_key = + rocksdb::Slice(reinterpret_cast(sk_buf), sk_memcmp_size); + + if (sk_info->sk_memcmp_key_old.size() > 0 && n_null_fields == 0 && + index_comp->Compare(sk_info->sk_memcmp_key, sk_info->sk_memcmp_key_old) == + 0) { + return 1; + } + + sk_info->sk_memcmp_key_old = sk_info->sk_memcmp_key; + return 0; +} + +int ha_rocksdb::bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, + const rocksdb::Slice &key, + const rocksdb::Slice &value) { + rocksdb::ColumnFamilyHandle *const cf = kd.get_cf(); + DBUG_ASSERT(cf != nullptr); + + if (m_sst_info == nullptr) { + m_sst_info = std::make_shared( + rdb, m_table_handler->m_table_name, kd.get_name(), cf, + rocksdb_db_options, THDVAR(ha_thd(), trace_sst_api)); + tx->start_bulk_load(this); + m_bulk_load_tx = tx; + } + + DBUG_ASSERT(m_sst_info != nullptr); + + int rc = m_sst_info->put(key, value); + if (rc != 0) { + my_printf_error(ER_UNKNOWN_ERROR, + "Failed to add a key to sst file writer(%s)", MYF(0), + m_sst_info->error_message().c_str()); + rc = HA_ERR_INTERNAL_ERROR; + } + + return rc; +} + +int ha_rocksdb::finalize_bulk_load() { + int rc = 0; + + /* Skip if there are no possible ongoing bulk loads */ + if (m_sst_info == nullptr && m_bulk_load_tx == nullptr) { + return rc; + } + + RDB_MUTEX_LOCK_CHECK(m_bulk_load_mutex); + + /* + We need this check because it's possible that m_sst_info has been + flushed and cleared by another thread by the time the mutex has been + acquired. + */ + if (m_sst_info != nullptr) { + rc = m_sst_info->commit(); + if (rc != 0) { + /* + Log the error immediately here in case the server crashes before + mysql prints via my_printf_error. + */ + sql_print_error("Failed to commit bulk loaded sst file to the " + "data store (%s)", + m_sst_info->error_message().c_str()); + + my_printf_error(ER_UNKNOWN_ERROR, + "Failed to commit bulk loaded sst file to the " + "data store (%s)", + MYF(0), m_sst_info->error_message().c_str()); + rc = HA_ERR_INTERNAL_ERROR; + } + + m_sst_info = nullptr; + m_bulk_load_tx->end_bulk_load(this); + m_bulk_load_tx = nullptr; + } + + RDB_MUTEX_UNLOCK_CHECK(m_bulk_load_mutex); + + return rc; +} + +int ha_rocksdb::update_pk(const Rdb_key_def &kd, + const struct update_row_info &row_info, + const bool &pk_changed) { + const uint key_id = kd.get_keyno(); + const bool hidden_pk = is_hidden_pk(key_id, table, m_tbl_def); + if (!hidden_pk && pk_changed) { + /* + The old key needs to be deleted. + */ + const rocksdb::Status s = delete_or_singledelete( + key_id, row_info.tx, kd.get_cf(), row_info.old_pk_slice); + if (!s.ok()) { + return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); + } + } + + if (table->next_number_field) { + update_auto_incr_val(); + } + + rocksdb::Slice value_slice; + convert_record_to_storage_format(row_info.new_pk_slice, + row_info.new_pk_unpack_info, &value_slice); + + int rc = 0; + const auto cf = m_pk_descr->get_cf(); + if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) && + !hidden_pk) { + /* + Write the primary key directly to an SST file using an SstFileWriter + */ + rc = bulk_load_key(row_info.tx, kd, row_info.new_pk_slice, value_slice); + } else if (row_info.skip_unique_check) { + /* + It is responsibility of the user to make sure that the data being + inserted doesn't violate any unique keys. + */ + row_info.tx->get_blind_write_batch()->Put(cf, row_info.new_pk_slice, + value_slice); + } else if (row_info.tx->m_ddl_transaction) { + /* + DDL statement must check for unique key conflicts. For example: + ALTER TABLE tbl DROP PRIMARY KEY, ADD PRIMARY KEY(non_unique_column) + */ + row_info.tx->get_indexed_write_batch()->Put(cf, row_info.new_pk_slice, + value_slice); + } else { + const auto s = row_info.tx->put(cf, row_info.new_pk_slice, value_slice); + if (!s.ok()) { + if (s.IsBusy()) { + errkey = table->s->primary_key; + m_dupp_errkey = errkey; + rc = HA_ERR_FOUND_DUPP_KEY; + } else { + rc = row_info.tx->set_status_error(table->in_use, s, *m_pk_descr, + m_tbl_def); + } + } + } + + return rc; +} + +int ha_rocksdb::update_sk(const TABLE *const table_arg, const Rdb_key_def &kd, + const struct update_row_info &row_info) { + int new_packed_size; + int old_packed_size; + + rocksdb::Slice new_key_slice; + rocksdb::Slice new_value_slice; + rocksdb::Slice old_key_slice; + + const uint key_id = kd.get_keyno(); + /* + Can skip updating this key if none of the key fields have changed. + */ + if (row_info.old_data != nullptr && !m_update_scope.is_set(key_id)) { + return HA_EXIT_SUCCESS; + } + + const bool store_row_debug_checksums = should_store_row_debug_checksums(); + + new_packed_size = kd.pack_record( + table_arg, m_pack_buffer, row_info.new_data, m_sk_packed_tuple, + &m_sk_tails, store_row_debug_checksums, row_info.hidden_pk_id); + + if (row_info.old_data != nullptr) { + // The old value + old_packed_size = kd.pack_record( + table_arg, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old, + &m_sk_tails_old, store_row_debug_checksums, row_info.hidden_pk_id); + + /* + Check if we are going to write the same value. This can happen when + one does + UPDATE tbl SET col='foo' + and we are looking at the row that already has col='foo'. + + We also need to compare the unpack info. Suppose, the collation is + case-insensitive, and unpack info contains information about whether + the letters were uppercase and lowercase. Then, both 'foo' and 'FOO' + will have the same key value, but different data in unpack_info. + + (note: anyone changing bytewise_compare should take this code into + account) + */ + if (old_packed_size == new_packed_size && + m_sk_tails_old.get_current_pos() == m_sk_tails.get_current_pos() && + memcmp(m_sk_packed_tuple_old, m_sk_packed_tuple, old_packed_size) == + 0 && + memcmp(m_sk_tails_old.ptr(), m_sk_tails.ptr(), + m_sk_tails.get_current_pos()) == 0) { + return HA_EXIT_SUCCESS; + } + + /* + Deleting entries from secondary index should skip locking, but + be visible to the transaction. + (also note that DDL statements do not delete rows, so this is not a DDL + statement) + */ + old_key_slice = rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple_old), old_packed_size); + + row_info.tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), + old_key_slice); + } + + /* + We're writing a new entry for secondary key. We can skip locking; we + should write to + - WriteBatchWithIndex normally (so that transaction sees the new row) + - non-indexed WriteBatch, when we don't need to see the new row: + = when doing a DDL operation and writing to a non-unique index, or + = when doing a bulk load + */ + rocksdb::WriteBatchBase *write_batch; + if ((row_info.tx->m_ddl_transaction && + !(table_arg->key_info[key_id].flags & HA_NOSAME)) || + row_info.skip_unique_check) { + write_batch = row_info.tx->get_blind_write_batch(); + } else { + write_batch = row_info.tx->get_indexed_write_batch(); + } + + new_key_slice = rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple), new_packed_size); + new_value_slice = + rocksdb::Slice(reinterpret_cast(m_sk_tails.ptr()), + m_sk_tails.get_current_pos()); + + write_batch->Put(kd.get_cf(), new_key_slice, new_value_slice); + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::update_indexes(const struct update_row_info &row_info, + const bool &pk_changed) { + int rc; + + for (uint key_id = 0; key_id < m_tbl_def->m_key_count; key_id++) { + const Rdb_key_def &kd = *m_key_descr_arr[key_id]; + if (is_pk(key_id, table, m_tbl_def)) { + rc = update_pk(kd, row_info, pk_changed); + } else { + rc = update_sk(table, kd, row_info); + } + + if (rc != 0) { + return rc; + } + } + + return HA_EXIT_SUCCESS; +} + +int ha_rocksdb::update_write_row(const uchar *const old_data, + const uchar *const new_data, + const bool skip_unique_check) { + DBUG_ENTER_FUNC(); + + bool pk_changed = false; + struct update_row_info row_info; + + row_info.old_data = old_data; + row_info.new_data = new_data; + row_info.skip_unique_check = skip_unique_check; + row_info.new_pk_unpack_info = nullptr; + + set_last_rowkey(old_data); + + row_info.tx = get_or_create_tx(table->in_use); + + if (old_data != nullptr) { + row_info.old_pk_slice = + rocksdb::Slice(m_last_rowkey.ptr(), m_last_rowkey.length()); + + /* Determine which indexes need updating. */ + calc_updated_indexes(); + } + + /* + Get the new row key into row_info.new_pk_slice + */ + int rc = get_pk_for_update(&row_info); + if (rc != 0) { + DBUG_RETURN(rc); + } + + if (!skip_unique_check) { + /* + Check to see if we are going to have failures because of unique + keys. Also lock the appropriate key values. + */ + rc = check_uniqueness_and_lock(row_info, &pk_changed); + if (rc != 0) { + DBUG_RETURN(rc); + } + } + + DEBUG_SYNC(ha_thd(), "rocksdb.update_write_row_after_unique_check"); + + /* + At this point, all locks have been obtained, and all checks for duplicate + keys have been performed. No further errors can be allowed to occur from + here because updates to the transaction will be made and those updates + cannot be easily removed without rolling back the entire transaction. + */ + rc = update_indexes(row_info, pk_changed); + if (rc != 0) { + DBUG_RETURN(rc); + } + + if (do_bulk_commit(row_info.tx)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Open a cursor +*/ + +void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, + rocksdb::Slice *const slice, + const bool use_all_keys, + const bool is_ascending, + const uint eq_cond_len) { + DBUG_ASSERT(slice != nullptr); + DBUG_ASSERT(slice->size() >= eq_cond_len); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + + bool skip_bloom = true; + + const rocksdb::Slice eq_cond(slice->data(), eq_cond_len); + if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending)) { + skip_bloom = false; + } + + /* + In some cases, setup_scan_iterator() is called multiple times from + the same query but bloom filter can not always be used. + Suppose the following query example. id2 is VARCHAR(30) and PRIMARY KEY + (id1, id2). + select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', + '100'); + In this case, setup_scan_iterator() is called twice, the first time is for + (id1, id2)=(100, '00000000000000000000') and the second time is for (100, + '100'). + If prefix bloom filter length is 24 bytes, prefix bloom filter can be used + for the + first condition but not for the second condition. + If bloom filter condition is changed, currently it is necessary to destroy + and + re-create Iterator. + */ + if (m_scan_it_skips_bloom != skip_bloom) { + release_scan_iterator(); + } + + /* + SQL layer can call rnd_init() multiple times in a row. + In that case, re-use the iterator, but re-position it at the table start. + */ + if (!m_scan_it) { + const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); + if (commit_in_the_middle()) { + DBUG_ASSERT(m_scan_it_snapshot == nullptr); + m_scan_it_snapshot = rdb->GetSnapshot(); + + auto read_opts = rocksdb::ReadOptions(); + read_opts.total_order_seek = true; // TODO: set based on WHERE conditions + read_opts.snapshot = m_scan_it_snapshot; + m_scan_it = rdb->NewIterator(read_opts, kd.get_cf()); + } else { + m_scan_it = tx->get_iterator(kd.get_cf(), skip_bloom, fill_cache); + } + m_scan_it_skips_bloom = skip_bloom; + } +} + +void ha_rocksdb::release_scan_iterator() { + delete m_scan_it; + m_scan_it = nullptr; + + if (m_scan_it_snapshot) { + rdb->ReleaseSnapshot(m_scan_it_snapshot); + m_scan_it_snapshot = nullptr; + } +} + +void ha_rocksdb::setup_iterator_for_rnd_scan() { + uint key_size; + + if (m_pk_descr->m_is_reverse_cf) + m_pk_descr->get_supremum_key(m_pk_packed_tuple, &key_size); + else + m_pk_descr->get_infimum_key(m_pk_packed_tuple, &key_size); + + rocksdb::Slice table_key((const char *)m_pk_packed_tuple, key_size); + + setup_scan_iterator(*m_pk_descr, &table_key); + m_scan_it->Seek(table_key); + m_skip_scan_it_next_call = true; +} + +int ha_rocksdb::rnd_init(bool scan) { + DBUG_ENTER_FUNC(); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + setup_read_decoders(); + + if (scan) { + m_rnd_scan_is_new_snapshot = !tx->has_snapshot(); + setup_iterator_for_rnd_scan(); + } else { + /* We don't need any preparations for rnd_pos() calls. */ + } + + // If m_lock_rows is on then we will be doing a get_for_update when accessing + // the index, so don't acquire the snapshot right away. Otherwise acquire + // the snapshot immediately. + tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +int ha_rocksdb::rnd_next(uchar *const buf) { + DBUG_ENTER_FUNC(); + + int rc; + for (;;) { + rc = rnd_next_with_direction(buf, true); + if (rc != HA_ERR_LOCK_DEADLOCK || !m_rnd_scan_is_new_snapshot) + break; /* exit the loop */ + + // release the snapshot and iterator and then regenerate them + Rdb_transaction *tx = get_or_create_tx(table->in_use); + tx->release_snapshot(); + release_scan_iterator(); + setup_iterator_for_rnd_scan(); + } + + m_rnd_scan_is_new_snapshot = false; + + if (rc == HA_ERR_KEY_NOT_FOUND) + rc = HA_ERR_END_OF_FILE; + + DBUG_RETURN(rc); +} + +/* + See also secondary_index_read(). +*/ +int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { + DBUG_ENTER_FUNC(); + + int rc; + + table->status = STATUS_NOT_FOUND; +#ifdef MARIAROCKS_NOT_YET + stats.rows_requested++; +#endif + if (!m_scan_it || !m_scan_it->Valid()) { + /* + We can get here when SQL layer has called + + h->index_init(PRIMARY); + h->index_read_map(full index tuple, HA_READ_KEY_EXACT); + + In this case, we should return EOF. + */ + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + + for (;;) { + if (m_skip_scan_it_next_call) { + m_skip_scan_it_next_call = false; + } else { + if (move_forward) + m_scan_it->Next(); /* this call cannot fail */ + else + m_scan_it->Prev(); /* this call cannot fail */ + } + + if (!m_scan_it->Valid()) { + rc = HA_ERR_END_OF_FILE; + break; + } + + /* check if we're out of this table */ + const rocksdb::Slice key = m_scan_it->key(); + if (!m_pk_descr->covers_key(key)) { + rc = HA_ERR_END_OF_FILE; + break; + } + + if (m_lock_rows != RDB_LOCK_NONE) { + /* + Lock the row we've just read. + + Now we call get_for_update which will 1) Take a lock and 2) Will fail + if the row was deleted since the snapshot was taken. + */ + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); + const rocksdb::Status s = + get_for_update(tx, m_pk_descr->get_cf(), key, &m_retrieved_record); + if (s.IsNotFound() && + my_core::thd_tx_isolation(ha_thd()) == ISO_READ_COMMITTED) { + // This occurs if we accessed a row, tried to lock it, failed, + // released and reacquired the snapshot (because of READ COMMITTED + // mode) and the row was deleted by someone else in the meantime. + // If so, we just want to move on to the next row. + continue; + } + + if (!s.ok()) { + DBUG_RETURN( + tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); + } + + // If we called get_for_update() use the value from that call not from + // the iterator as it may be stale since we don't have a snapshot + // when m_lock_rows is not RDB_LOCK_NONE. + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); + rc = convert_record_from_storage_format(&key, buf); + } else { + // Use the value from the iterator + rocksdb::Slice value = m_scan_it->value(); + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); + rc = convert_record_from_storage_format(&key, &value, buf); + } + + table->status = 0; + break; + } + + if (!rc) { +#ifdef MARIAROCKS_NOT_YET + stats.rows_read++; + stats.rows_index_next++; +#endif + update_row_stats(ROWS_READ); + } + + DBUG_RETURN(rc); +} + +int ha_rocksdb::rnd_end() { + DBUG_ENTER_FUNC(); + + release_scan_iterator(); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +int ha_rocksdb::index_init(uint idx, bool sorted) { + DBUG_ENTER_FUNC(); + + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + DBUG_ASSERT(tx != nullptr); + + setup_read_decoders(); + + // If m_lock_rows is not RDB_LOCK_NONE then we will be doing a get_for_update + // when accessing the index, so don't acquire the snapshot right away. + // Otherwise acquire the snapshot immediately. + tx->acquire_snapshot(m_lock_rows == RDB_LOCK_NONE); + + active_index = idx; + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +int ha_rocksdb::index_end() { + DBUG_ENTER_FUNC(); + + release_scan_iterator(); + + active_index = MAX_KEY; + in_range_check_pushed_down = FALSE; + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + + +int ha_rocksdb::truncate() { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(m_tbl_def != nullptr); + + m_tbl_def->m_auto_incr_val = 1; + m_tbl_def->m_hidden_pk_val = 1; + + remove_rows(m_tbl_def); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Delete the row we've last read. The row is also passed as parameter. + + @detail + The caller guarantees table buf points to the row that was just read. + The row is either table->record[0] or table->record[1]. + (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)" +*/ +int ha_rocksdb::delete_row(const uchar *const buf) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(buf != nullptr); + + set_last_rowkey(buf); + + rocksdb::Slice key_slice(m_last_rowkey.ptr(), m_last_rowkey.length()); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); + + const uint index = pk_index(table, m_tbl_def); + rocksdb::Status s = + delete_or_singledelete(index, tx, m_pk_descr->get_cf(), key_slice); + if (!s.ok()) { + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); + } + + longlong hidden_pk_id = 0; + if (m_tbl_def->m_key_count > 1 && has_hidden_pk(table) && + read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + // Delete the record for every secondary index + for (uint i = 0; i < m_tbl_def->m_key_count; i++) { + if (!is_pk(i, table, m_tbl_def)) { + int packed_size; + const Rdb_key_def &kd = *m_key_descr_arr[i]; + packed_size = kd.pack_record(table, m_pack_buffer, buf, m_sk_packed_tuple, + nullptr, false, hidden_pk_id); + rocksdb::Slice secondary_key_slice( + reinterpret_cast(m_sk_packed_tuple), packed_size); + /* Deleting on secondary key doesn't need any locks: */ + tx->get_indexed_write_batch()->SingleDelete(kd.get_cf(), + secondary_key_slice); + } + } + + if (do_bulk_commit(tx)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } +#ifdef MARIAROCKS_NOT_YET + stats.rows_deleted++; +#endif + update_row_stats(ROWS_DELETED); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +rocksdb::Status ha_rocksdb::delete_or_singledelete( + uint index, Rdb_transaction *const tx, + rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key) { + if (can_use_single_delete(index)) + return tx->single_delete(column_family, key); + return tx->delete_key(column_family, key); +} + +void ha_rocksdb::update_stats(void) { + DBUG_ENTER_FUNC(); + + stats.records = 0; + stats.index_file_length = 0ul; + stats.data_file_length = 0ul; + stats.mean_rec_length = 0; + + for (uint i = 0; i < m_tbl_def->m_key_count; i++) { + if (is_pk(i, table, m_tbl_def)) { + stats.data_file_length = m_pk_descr->m_stats.m_actual_disk_size; + stats.records = m_pk_descr->m_stats.m_rows; + } else { + stats.index_file_length += m_key_descr_arr[i]->m_stats.m_actual_disk_size; + } + } + + DBUG_VOID_RETURN; +} + +int ha_rocksdb::info(uint flag) { + DBUG_ENTER_FUNC(); + + if (!table) + return HA_EXIT_FAILURE; + + if (flag & HA_STATUS_VARIABLE) { + /* + Test only to simulate corrupted stats + */ + DBUG_EXECUTE_IF("myrocks_simulate_negative_stats", + m_pk_descr->m_stats.m_actual_disk_size = + -m_pk_descr->m_stats.m_actual_disk_size;); + + update_stats(); + + /* + If any stats are negative due to bad cached stats, re-run analyze table + and re-retrieve the stats. + */ + if (static_cast(stats.data_file_length) < 0 || + static_cast(stats.index_file_length) < 0 || + static_cast(stats.records) < 0) { + if (analyze(nullptr, nullptr)) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + update_stats(); + } + + // if number of records is hardcoded, we do not want to force computation + // of memtable cardinalities + if (stats.records == 0 || + (rocksdb_force_compute_memtable_stats && + rocksdb_debug_optimizer_n_rows == 0)) + { + // First, compute SST files stats + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; + auto r = get_range(pk_index(table, m_tbl_def), buf); + uint64_t sz = 0; + uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; + // recompute SST files stats only if records count is 0 + if (stats.records == 0) { + rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, + include_flags); + stats.records+= sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + stats.data_file_length+= sz; + } + // Second, compute memtable stats + uint64_t memtableCount; + uint64_t memtableSize; + rdb->GetApproximateMemTableStats(m_pk_descr->get_cf(), r, + &memtableCount, &memtableSize); + stats.records += memtableCount; + stats.data_file_length += memtableSize; + + if (rocksdb_debug_optimizer_n_rows > 0) + stats.records = rocksdb_debug_optimizer_n_rows; + } + + if (stats.records != 0) + stats.mean_rec_length = stats.data_file_length / stats.records; + } + if (flag & HA_STATUS_CONST) { + ref_length = m_pk_descr->max_storage_fmt_length(); + + // TODO: Needs to reimplement after having real index statistics + for (uint i = 0; i < m_tbl_def->m_key_count; i++) { + if (is_hidden_pk(i, table, m_tbl_def)) { + continue; + } + KEY *const k = &table->key_info[i]; + for (uint j = 0; j < k->ext_key_parts; j++) { + const Rdb_index_stats &k_stats = m_key_descr_arr[i]->m_stats; + uint x = k_stats.m_distinct_keys_per_prefix.size() > j && + k_stats.m_distinct_keys_per_prefix[j] > 0 + ? k_stats.m_rows / k_stats.m_distinct_keys_per_prefix[j] + : 0; + if (x > stats.records) + x = stats.records; + if ((x == 0 && rocksdb_debug_optimizer_no_zero_cardinality) || + rocksdb_debug_optimizer_n_rows > 0) { + // Fake cardinality implementation. For example, (idx1, idx2, idx3) + // index + /* + Make MariaRocks behave the same way as MyRocks does: + 1. SQL layer thinks that unique secondary indexes are not extended + with PK columns (both in MySQL and MariaDB) + 2. MariaDB also thinks that indexes with partially-covered columns + are not extended with PK columns. Use the same number of + keyparts that MyRocks would use. + */ + uint ext_key_parts2; + if (k->flags & HA_NOSAME) + ext_key_parts2= k->ext_key_parts; // This is #1 + else + ext_key_parts2= m_key_descr_arr[i]->get_key_parts(); // This is #2. + + // will have rec_per_key for (idx1)=4, (idx1,2)=2, and (idx1,2,3)=1. + // rec_per_key for the whole index is 1, and multiplied by 2^n if + // n suffix columns of the index are not used. + x = 1 << (ext_key_parts2 - j - 1); + } + k->rec_per_key[j] = x; + } + } + } + + if (flag & HA_STATUS_ERRKEY) { + /* + Currently we support only primary keys so we know which key had a + uniqueness violation. + */ + errkey = m_dupp_errkey; + dup_ref = m_pk_tuple; // TODO(?): this should store packed PK. + } + + if (flag & HA_STATUS_AUTO) { + stats.auto_increment_value = m_tbl_def->m_auto_incr_val; + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +void ha_rocksdb::position(const uchar *const record) { + DBUG_ENTER_FUNC(); + + longlong hidden_pk_id = 0; + if (has_hidden_pk(table) && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) + DBUG_ASSERT(false); // should never reach here + + /* + Get packed primary key value from the record. + + (Note: m_last_rowkey has the packed PK of last-read-row, which allows to + handle most cases, but there is an exception to this: when slave applies + RBR events, it fills the record and then calls position(); rnd_pos()) + + Also note that we *can* rely on PK column values being available. This is + because table_flags() includes HA_PRIMARY_KEY_REQUIRED_FOR_POSITION bit. + When that is true, table->prepare_for_position() adds PK columns into the + read set (this may potentially disable index-only access when PK column + cannot be restored from its mem-comparable form in the secondary indexes). + */ + const uint packed_size = m_pk_descr->pack_record( + table, m_pack_buffer, record, ref, nullptr, false, hidden_pk_id); + + /* + It could be that mem-comparable form of PK occupies less than ref_length + bytes. Fill the remainder with zeros. + */ + if (ref_length > packed_size) + memset(ref + packed_size, 0, ref_length - packed_size); + + DBUG_VOID_RETURN; +} + +int ha_rocksdb::rnd_pos(uchar *const buf, uchar *const pos) { + DBUG_ENTER_FUNC(); + + int rc; + size_t len; + +#ifdef MARIAROCKS_NOT_YET + stats.rows_requested++; +#endif + len = m_pk_descr->key_length(table, + rocksdb::Slice((const char *)pos, ref_length)); + if (len == size_t(-1)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); /* Data corruption? */ + } + + rc = get_row_by_rowid(buf, pos, len); + + if (!rc) { +#ifdef MARIAROCKS_NOT_YET + stats.rows_read++; +#endif + update_row_stats(ROWS_READ); + } + + DBUG_RETURN(rc); +} + +/* + @brief + Calculate (if needed) the bitmap of indexes that are modified by the + current query + + @detail + The calculation done by checking index definitions against the + table->write_set + column bitmap. +*/ + +void ha_rocksdb::calc_updated_indexes() { + if (!m_update_scope_is_valid) { + m_update_scope_is_valid = true; + m_update_scope.clear_all(); + + for (uint keynr = 0; keynr < table->s->keys; keynr++) { + const Rdb_key_def &kd = *m_key_descr_arr[keynr]; + /* Walk over all key parts, including the "extended key" suffix */ + const uint key_parts = kd.get_key_parts(); + for (uint kp = 0; kp < key_parts; kp++) { + if (has_hidden_pk(table) && kp + 1 == key_parts) + break; + + Field *const field = kd.get_table_field_for_part_no(table, kp); + if (bitmap_is_set(table->write_set, field->field_index)) { + m_update_scope.set_bit(keynr); + break; + } + } + } + } +} + +int ha_rocksdb::update_row(const uchar *const old_data, uchar *const new_data) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(old_data != nullptr); + DBUG_ASSERT(new_data != nullptr); + DBUG_ASSERT(m_lock_rows == RDB_LOCK_WRITE); + /* + old_data points to record we're updating. It is the same as the record + we've just read (for multi-table UPDATE, too, because SQL layer will make + an rnd_pos() call to re-read the record before calling update_row()) + */ + DBUG_ASSERT(new_data == table->record[0]); + + const int rv = update_write_row(old_data, new_data, false); + + if (rv == 0) { +#ifdef MARIAROCKS_NOT_YET + stats.rows_updated++; +#endif + update_row_stats(ROWS_UPDATED); + } + + DBUG_RETURN(rv); +} + +/* + MariaDB's temporary: MyRocks has this function in sql/handler.cc: +*/ + +bool can_hold_read_locks_on_select(THD *thd, thr_lock_type lock_type) +{ + return (lock_type == TL_READ_WITH_SHARED_LOCKS + || lock_type == TL_READ_NO_INSERT + || (lock_type != TL_IGNORE + && thd->lex->sql_command != SQLCOM_SELECT)); +} + + +/* The following function was copied from ha_blackhole::store_lock: */ +THR_LOCK_DATA **ha_rocksdb::store_lock(THD *const thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(to != nullptr); + + bool in_lock_tables = my_core::thd_in_lock_tables(thd); + + /* First, make a decision about MyRocks's internal locking */ + if (lock_type >= TL_WRITE_ALLOW_WRITE) { + m_lock_rows = RDB_LOCK_WRITE; + } else if (lock_type == TL_READ_WITH_SHARED_LOCKS) { + m_lock_rows = RDB_LOCK_READ; + } else { + m_lock_rows = RDB_LOCK_NONE; + if (THDVAR(thd, lock_scanned_rows)) { + /* + The following logic was copied directly from + ha_innobase::store_lock_with_x_type() in + storage/innobase/handler/ha_innodb.cc and causes MyRocks to leave + locks in place on rows that are in a table that is not being updated. + */ + const uint sql_command = my_core::thd_sql_command(thd); + if ((lock_type == TL_READ && in_lock_tables) || + (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || + can_hold_read_locks_on_select(thd, lock_type)) { + ulong tx_isolation = my_core::thd_tx_isolation(thd); + if (sql_command != SQLCOM_CHECKSUM && + ((my_core::thd_test_options(thd, OPTION_BIN_LOG) && + tx_isolation > ISO_READ_COMMITTED) || + tx_isolation == ISO_SERIALIZABLE || + (lock_type != TL_READ && lock_type != TL_READ_NO_INSERT) || + (sql_command != SQLCOM_INSERT_SELECT && + sql_command != SQLCOM_REPLACE_SELECT && + sql_command != SQLCOM_UPDATE && sql_command != SQLCOM_DELETE && + sql_command != SQLCOM_CREATE_TABLE))) { + m_lock_rows = RDB_LOCK_READ; + } + } + } + } + + /* Then, tell the SQL layer what kind of locking it should use: */ + if (lock_type != TL_IGNORE && m_db_lock.type == TL_UNLOCK) { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && + !in_lock_tables && !my_core::thd_tablespace_op(thd)) { + lock_type = TL_WRITE_ALLOW_WRITE; + } + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !in_lock_tables) { + lock_type = TL_READ; + } + + m_db_lock.type = lock_type; + } + + *to++ = &m_db_lock; + + DBUG_RETURN(to); +} + +void ha_rocksdb::read_thd_vars(THD *const thd) { + m_store_row_debug_checksums = THDVAR(thd, store_row_debug_checksums); + m_verify_row_debug_checksums = THDVAR(thd, verify_row_debug_checksums); + m_checksums_pct = THDVAR(thd, checksums_pct); +} + +const char *ha_rocksdb::thd_rocksdb_tmpdir() { + const char *tmp_dir = THDVAR(ha_thd(), tmpdir); + + /* + We want to treat an empty string as nullptr, in these cases DDL operations + will use the default --tmpdir passed to mysql instead. + */ + if (tmp_dir != nullptr && *tmp_dir == '\0') { + tmp_dir = nullptr; + } + + return (tmp_dir); +} + +int ha_rocksdb::external_lock(THD *const thd, int lock_type) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + + int res = 0; + int binlog_format = my_core::thd_binlog_format(thd); + bool unsafe_for_binlog = THDVAR(ha_thd(), unsafe_for_binlog); + if (lock_type == F_WRLCK && !thd->rgi_slave && !unsafe_for_binlog && + binlog_format != BINLOG_FORMAT_ROW && + binlog_format != BINLOG_FORMAT_UNSPEC && + my_core::thd_binlog_filter_ok(thd)) { + my_printf_error( + ER_UNKNOWN_ERROR, + "Can't execute updates on master with binlog_format != ROW.", MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + if (lock_type == F_UNLCK) { + Rdb_transaction *const tx = get_or_create_tx(thd); + + tx->io_perf_end_and_record(&m_io_perf); + if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + /* + Do like InnoDB: when we get here, it's time to commit a + single-statement transaction. + + If the statement involved multiple tables, this code will be executed + for each of them, but that's ok because non-first tx->commit() calls + will be no-ops. + */ + res = tx->commit_or_rollback(); + } + } else { + if (my_core::thd_tx_isolation(thd) < ISO_READ_COMMITTED || + my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ) { + my_printf_error( + ER_UNKNOWN_ERROR, "MyRocks supports only READ COMMITTED and " + "REPEATABLE READ isolation levels. " + "Please change from current isolation " + "level %s", + MYF(0), tx_isolation_names[my_core::thd_tx_isolation(thd)]); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + /* + It's nice to do the following on start of every statement. The problem + is, handler->start_stmt() is not called for INSERTs. + So, we put this code here. + */ + Rdb_transaction *const tx = get_or_create_tx(thd); + read_thd_vars(thd); + + m_update_scope_is_valid = false; + + if (skip_unique_check()) { + if ((thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->sql_command == SQLCOM_LOAD || + thd->lex->sql_command == SQLCOM_REPLACE) && + (thd->lex->duplicates == DUP_REPLACE || + thd->lex->duplicates == DUP_UPDATE)) { + my_printf_error(ER_UNKNOWN_ERROR, + "When unique checking is disabled in MyRocks, INSERT," + "UPDATE,LOAD statements with clauses that update or " + "replace the key (i.e. " + "INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not " + "allowed. Query: %s", + MYF(0), thd->query()); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + + if (lock_type == F_WRLCK) { + if (tx->is_tx_read_only()) { + my_printf_error(ER_UNKNOWN_ERROR, + "Can't execute updates when you started a transaction " + "with START TRANSACTION WITH CONSISTENT [ROCKSDB] " + "SNAPSHOT.", + MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* + SQL layer signals us to take a write lock. It does so when starting DML + statement. We should put locks on the rows we're reading. + + Note: sometimes, external_lock() can be called without a prior + ::store_lock call. That's why we need to set lock_* members here, too. + */ + m_lock_rows = RDB_LOCK_WRITE; + + if (thd->lex->sql_command == SQLCOM_CREATE_INDEX || + thd->lex->sql_command == SQLCOM_DROP_INDEX || + thd->lex->sql_command == SQLCOM_ALTER_TABLE) { + tx->m_ddl_transaction = true; + } + } + rocksdb_register_tx(rocksdb_hton, thd, tx); + tx->io_perf_start(&m_io_perf); + } + + DBUG_RETURN(res); +} + +/** + @note + A quote from ha_innobase::start_stmt(): + + MySQL calls this function at the start of each SQL statement inside LOCK + TABLES. Inside LOCK TABLES the ::external_lock method does not work to + mark SQL statement borders. + +*/ + +int ha_rocksdb::start_stmt(THD *const thd, thr_lock_type lock_type) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + + Rdb_transaction *const tx = get_or_create_tx(thd); + read_thd_vars(thd); + rocksdb_register_tx(ht, thd, tx); + tx->io_perf_start(&m_io_perf); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +rocksdb::Range get_range(uint32_t i, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2], + int offset1, int offset2) { + uchar *buf_begin = buf; + uchar *buf_end = buf + Rdb_key_def::INDEX_NUMBER_SIZE; + rdb_netbuf_store_index(buf_begin, i + offset1); + rdb_netbuf_store_index(buf_end, i + offset2); + + return rocksdb::Range( + rocksdb::Slice((const char *)buf_begin, Rdb_key_def::INDEX_NUMBER_SIZE), + rocksdb::Slice((const char *)buf_end, Rdb_key_def::INDEX_NUMBER_SIZE)); +} + +static rocksdb::Range get_range(const Rdb_key_def &kd, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2], + int offset1, int offset2) { + return get_range(kd.get_index_number(), buf, offset1, offset2); +} + +rocksdb::Range get_range(const Rdb_key_def &kd, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]) { + if (kd.m_is_reverse_cf) { + return myrocks::get_range(kd, buf, 1, 0); + } else { + return myrocks::get_range(kd, buf, 0, 1); + } +} + +rocksdb::Range +ha_rocksdb::get_range(const int &i, + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]) const { + return myrocks::get_range(*m_key_descr_arr[i], buf); +} + +static bool is_myrocks_index_empty( + rocksdb::ColumnFamilyHandle *cfh, const bool is_reverse_cf, + const rocksdb::ReadOptions &read_opts, + const uint index_id) +{ + bool index_removed = false; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; + rdb_netbuf_store_uint32(key_buf, index_id); + const rocksdb::Slice key = + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)); + std::unique_ptr it(rdb->NewIterator(read_opts, cfh)); + rocksdb_smart_seek(is_reverse_cf, it.get(), key); + if (!it->Valid()) { + index_removed = true; + } else { + if (memcmp(it->key().data(), key_buf, + Rdb_key_def::INDEX_NUMBER_SIZE)) { + // Key does not have same prefix + index_removed = true; + } + } + return index_removed; +} + +/* + Drop index thread's main logic +*/ + +void Rdb_drop_index_thread::run() { + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + + for (;;) { + // The stop flag might be set by shutdown command + // after drop_index_thread releases signal_mutex + // (i.e. while executing expensive Seek()). To prevent drop_index_thread + // from entering long cond_timedwait, checking if stop flag + // is true or not is needed, with drop_index_interrupt_mutex held. + if (m_stop) { + break; + } + + timespec ts; + int sec= dict_manager.is_drop_index_empty() + ? 24 * 60 * 60 // no filtering + : 60; // filtering + set_timespec(ts,sec); + + const auto ret MY_ATTRIBUTE((__unused__)) = + mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts); + if (m_stop) { + break; + } + // make sure, no program error is returned + DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); + + std::unordered_set indices; + dict_manager.get_ongoing_drop_indexes(&indices); + if (!indices.empty()) { + std::unordered_set finished; + rocksdb::ReadOptions read_opts; + read_opts.total_order_seek = true; // disable bloom filter + + for (const auto d : indices) { + uint32 cf_flags = 0; + if (!dict_manager.get_cf_flags(d.cf_id, &cf_flags)) { + sql_print_error("RocksDB: Failed to get column family flags " + "from cf id %u. MyRocks data dictionary may " + "get corrupted.", + d.cf_id); + abort_with_stack_traces(); + } + rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(d.cf_id); + DBUG_ASSERT(cfh); + const bool is_reverse_cf = cf_flags & Rdb_key_def::REVERSE_CF_FLAG; + + if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id)) + { + finished.insert(d); + continue; + } + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; + rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf ? 1 : 0, + is_reverse_cf ? 0 : 1); + rocksdb::CompactRangeOptions compact_range_options; + compact_range_options.bottommost_level_compaction = + rocksdb::BottommostLevelCompaction::kForce; + compact_range_options.exclusive_manual_compaction = false; + rocksdb::Status status = DeleteFilesInRange(rdb->GetBaseDB(), cfh, + &range.start, &range.limit); + if (!status.ok()) { + if (status.IsShutdownInProgress()) { + break; + } + rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); + } + status = rdb->CompactRange(compact_range_options, cfh, &range.start, + &range.limit); + if (!status.ok()) { + if (status.IsShutdownInProgress()) { + break; + } + rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD); + } + if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id)) + { + finished.insert(d); + } + } + + if (!finished.empty()) { + dict_manager.finish_drop_indexes(finished); + } + } + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + } + + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); +} + +Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) { + DBUG_ASSERT(tablename != nullptr); + + std::string str; + if (rdb_normalize_tablename(tablename, &str)) { + // We were not passed table name? + DBUG_ASSERT(0); + return nullptr; + } + + return ddl_manager.find(str); +} + +/* + Note: the following function is called when the table is not open. That is, + this->table==nullptr, pk_key_descr==nullptr, etc. + + tablename points to line in form "./dbname/tablename". +*/ + +int ha_rocksdb::delete_table(const char *const tablename) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(tablename != nullptr); + + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + /* Find the table in the hash */ + Rdb_tbl_def *const tbl = get_table_if_exists(tablename); + if (!tbl) + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + dict_manager.add_drop_table(tbl->m_key_descr_arr, tbl->m_key_count, batch); + + /* + Remove the table entry in data dictionary (this will also remove it from + the persistent data dictionary). + */ + ddl_manager.remove(tbl, batch, true); + if (dict_manager.commit(batch)) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + rdb_drop_idx_thread.signal(); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +void ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { + const rocksdb::WriteOptions wo = + rdb_get_rocksdb_write_options(handler::ha_thd()); + + rocksdb::ReadOptions opts; + opts.total_order_seek = true; + rocksdb::Iterator *const it = rdb->NewIterator(opts); + + char key_buf[MAX_KEY_LENGTH]; + uint key_len; + /* + Remove all records in each index. + (This is is not crash-safe, but it doesn't matter, because bulk row + deletion will be handled on rocksdb side) + */ + for (uint i = 0; i < tbl->m_key_count; i++) { + const Rdb_key_def &kd = *tbl->m_key_descr_arr[i]; + kd.get_infimum_key(reinterpret_cast(key_buf), &key_len); + + const rocksdb::Slice table_key(key_buf, key_len); + it->Seek(table_key); + while (it->Valid()) { + const rocksdb::Slice key = it->key(); + if (!kd.covers_key(key)) { + break; + } + + if (can_use_single_delete(i)) + rdb->SingleDelete(wo, key); + else + rdb->Delete(wo, key); + it->Next(); + } + } + delete it; +} + +int ha_rocksdb::rename_table(const char *const from, const char *const to) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(from != nullptr); + DBUG_ASSERT(to != nullptr); + + bool write_err = false; + std::string from_str; + std::string to_str; + std::string from_db; + std::string to_db; + + if (rdb_normalize_tablename(from, &from_str) || + rdb_split_normalized_tablename(from_str, &from_db)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + if (rdb_normalize_tablename(to, &to_str) || + rdb_split_normalized_tablename(to_str, &to_db)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + // If the user changed the database part of the name then validate that the + // 'to' database exists. + if (from_db != to_db && !rdb_database_exists(to_db)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* Check if any index has a per-index column family */ + { + /* + We can't check this directly, because + 1. the object that referred to the old table has been close()d. + 2. this ha_rocksdb object has no connection to any table at all, it has + been just created with (TABLE*)nullptr. + + So, we have to find the old table in the ddl_manager (it's there because + it has been opened by this statement), and check is_auto_cf there. + */ + Rdb_tbl_def *tdef; + if (!(tdef = ddl_manager.find(from_str))) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + for (uint i = 0; i < tdef->m_key_count; i++) { + DBUG_ASSERT(tdef->m_key_descr_arr != nullptr); + + if (tdef->m_key_descr_arr[i]->m_is_auto_cf) { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "ALTER TABLE on table with per-index CF"); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + } + } + + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); + dict_manager.lock(); + write_err = + ddl_manager.rename(from_str, to_str, batch) || dict_manager.commit(batch); + dict_manager.unlock(); + + DBUG_RETURN(write_err ? HA_ERR_INTERNAL_ERROR : 0); +} + +/** + check_if_incompatible_data() called if ALTER TABLE can't detect otherwise + if new and old definition are compatible + + @details If there are no other explicit signs like changed number of + fields this function will be called by compare_tables() + (sql/sql_tables.cc) to decide should we rewrite whole table or only .frm + file. + +*/ + +bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *const info, + uint table_changes) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(info != nullptr); + + // this function is needed only for online alter-table + DBUG_RETURN(COMPATIBLE_DATA_NO); +} + +int ha_rocksdb::extra(enum ha_extra_function operation) { + DBUG_ENTER_FUNC(); + + switch (operation) { + case HA_EXTRA_KEYREAD: + m_keyread_only = true; + break; + case HA_EXTRA_NO_KEYREAD: + m_keyread_only = false; + break; + case HA_EXTRA_FLUSH: + /* + If the table has blobs, then they are part of m_retrieved_record. + This call invalidates them. + */ + m_retrieved_record.clear(); + break; + default: + break; + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/* + Given a starting key and an ending key, estimate the number of rows that + will exist between the two keys. +*/ +ha_rows ha_rocksdb::records_in_range(uint inx, key_range *const min_key, + key_range *const max_key) { + DBUG_ENTER_FUNC(); + + ha_rows ret = THDVAR(ha_thd(), records_in_range); + if (ret) { + DBUG_RETURN(ret); + } + if (table->force_index) { + const ha_rows force_rows = THDVAR(ha_thd(), force_index_records_in_range); + if (force_rows) { + DBUG_RETURN(force_rows); + } + } + + const Rdb_key_def &kd = *m_key_descr_arr[inx]; + + uint size1 = 0; + if (min_key) { + size1 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple, + min_key->key, min_key->keypart_map); + if (min_key->flag == HA_READ_PREFIX_LAST_OR_PREV || + min_key->flag == HA_READ_PREFIX_LAST || + min_key->flag == HA_READ_AFTER_KEY) { + kd.successor(m_sk_packed_tuple, size1); + } + } else { + kd.get_infimum_key(m_sk_packed_tuple, &size1); + } + + uint size2 = 0; + if (max_key) { + size2 = kd.pack_index_tuple(table, m_pack_buffer, m_sk_packed_tuple_old, + max_key->key, max_key->keypart_map); + if (max_key->flag == HA_READ_PREFIX_LAST_OR_PREV || + max_key->flag == HA_READ_PREFIX_LAST || + max_key->flag == HA_READ_AFTER_KEY) { + kd.successor(m_sk_packed_tuple_old, size2); + } + // pad the upper key with FFFFs to make sure it is more than the lower + if (size1 > size2) { + memset(m_sk_packed_tuple_old + size2, 0xff, size1 - size2); + size2 = size1; + } + } else { + kd.get_supremum_key(m_sk_packed_tuple_old, &size2); + } + + const rocksdb::Slice slice1((const char *)m_sk_packed_tuple, size1); + const rocksdb::Slice slice2((const char *)m_sk_packed_tuple_old, size2); + + // slice1 >= slice2 means no row will match + if (slice1.compare(slice2) >= 0) { + DBUG_RETURN(HA_EXIT_SUCCESS); + } + + rocksdb::Range r(kd.m_is_reverse_cf ? slice2 : slice1, + kd.m_is_reverse_cf ? slice1 : slice2); + + uint64_t sz = 0; + auto disk_size = kd.m_stats.m_actual_disk_size; + if (disk_size == 0) + disk_size = kd.m_stats.m_data_size; + auto rows = kd.m_stats.m_rows; + if (rows == 0 || disk_size == 0) { + rows = 1; + disk_size = ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; + } + + // Getting statistics, including from Memtables + uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; + rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, include_flags); + ret = rows * sz / disk_size; + uint64_t memTableCount; + rdb->GetApproximateMemTableStats(kd.get_cf(), r, &memTableCount, &sz); + ret += memTableCount; + + /* + GetApproximateSizes() gives estimates so ret might exceed stats.records. + MySQL then decides to use full index scan rather than range scan, which + is not efficient for most cases. + To prevent this, changing estimated records slightly smaller than + stats.records. + */ + if (ret >= stats.records) { + ret = stats.records * 0.99; + } + + if (rocksdb_debug_optimizer_n_rows > 0) { + ret = rocksdb_debug_optimizer_n_rows; + } else if (ret == 0) { + ret = 1; + } + + DBUG_RETURN(ret); +} + +void ha_rocksdb::update_create_info(HA_CREATE_INFO *const create_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(create_info != nullptr); + + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { + create_info->auto_increment_value = m_tbl_def->m_auto_incr_val; + } + + DBUG_VOID_RETURN; +} + +/** + @brief + Doing manual compaction on OPTIMIZE TABLE in RocksDB. + Compaction itself is executed by background thread in RocksDB, but + CompactRange() waits until compaction completes so this function + may take a long time. + Since RocksDB dataset is allocated per index id, OPTIMIZE TABLE + triggers manual compaction for all indexes of the table. + @details + Compaction range is from the beginning of the index id to + the first row of the next index id. When using reverse order + column family, the first row of the next index id should be + the last row of the previous index id. +*/ +int ha_rocksdb::optimize(THD *const thd, HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(check_opt != nullptr); + + int rc = 0; + for (uint i = 0; i < table->s->keys; i++) { + uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; + auto range = get_range(i, buf); + if (!rdb->CompactRange(rocksdb::CompactRangeOptions(), + m_key_descr_arr[i]->get_cf(), &range.start, + &range.limit) + .ok()) { + rc = 1; + break; + } + } + + DBUG_RETURN(rc); +} + +int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, + HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); + + // find per column family key ranges which need to be queried + std::unordered_map> + ranges; + std::unordered_set ids_to_check; + std::unordered_map ids_to_keyparts; + std::vector buf(table_arg->s->keys * 2 * + Rdb_key_def::INDEX_NUMBER_SIZE); + for (uint i = 0; i < table_arg->s->keys; i++) { + const auto bufp = &buf[i * 2 * Rdb_key_def::INDEX_NUMBER_SIZE]; + const Rdb_key_def &kd = *m_key_descr_arr[i]; + ranges[kd.get_cf()].push_back(get_range(i, bufp)); + ids_to_check.insert(kd.get_gl_index_id()); + ids_to_keyparts[kd.get_gl_index_id()] = kd.get_key_parts(); + } + + // for analyze statements, force flush on memtable to get accurate cardinality + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + if (thd != nullptr && THDVAR(thd, flush_memtable_on_analyze) && + !rocksdb_pause_background_work) { + for (auto it : ids_to_check) { + rdb->Flush(rocksdb::FlushOptions(), cf_manager.get_cf(it.cf_id)); + } + } + + // get RocksDB table properties for these ranges + rocksdb::TablePropertiesCollection props; + for (auto it : ranges) { + const auto old_size MY_ATTRIBUTE((__unused__)) = props.size(); + const auto status = rdb->GetPropertiesOfTablesInRange( + it.first, &it.second[0], it.second.size(), &props); + DBUG_ASSERT(props.size() >= old_size); + if (!status.ok()) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + int num_sst = 0; + // group stats per index id + std::unordered_map stats; + for (const auto &it : ids_to_check) { + // Initialize the stats to 0. If there are no files that contain + // this gl_index_id, then 0 should be stored for the cached stats. + stats[it] = Rdb_index_stats(it); + DBUG_ASSERT(ids_to_keyparts.count(it) > 0); + stats[it].m_distinct_keys_per_prefix.resize(ids_to_keyparts[it]); + } + for (const auto &it : props) { + std::vector sst_stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); + /* + sst_stats is a list of index statistics for indexes that have entries + in the current SST file. + */ + for (const auto &it1 : sst_stats) { + /* + Only update statistics for indexes that belong to this SQL table. + + The reason is: We are walking through all SST files that have + entries from this table (and so can compute good statistics). For + other SQL tables, it can be that we're only seeing a small fraction + of table's entries (and so we can't update statistics based on that). + */ + if (ids_to_check.find(it1.m_gl_index_id) == ids_to_check.end()) + continue; + + auto kd = ddl_manager.safe_find(it1.m_gl_index_id); + DBUG_ASSERT(kd != nullptr); + stats[it1.m_gl_index_id].merge(it1, true, kd->max_storage_fmt_length()); + } + num_sst++; + } + + // set and persist new stats + ddl_manager.set_stats(stats); + ddl_manager.persist_stats(true); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +int ha_rocksdb::analyze(THD *const thd, HA_CHECK_OPT *const check_opt) { + DBUG_ENTER_FUNC(); + + if (!table) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + DBUG_RETURN(calculate_stats(table, thd, check_opt)); +} + +void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, + ulonglong nb_desired_values, + ulonglong *const first_value, + ulonglong *const nb_reserved_values) { + /* + MySQL has a somewhat complicated way of handling the auto-increment value. + The first time get_auto_increment is called for a statement, + nb_desired_values is the estimate for how many values will be needed. The + engine can then reserve some values, and those will be automatically used + by MySQL, until a hard-coded value shows up in the insert statement, after + which MySQL again calls this function to reset its starting value. + * + For simplicity we will just ignore nb_desired_values - we aren't going to + reserve any extra values for a multi-insert statement. Each row will + simply acquire the next value as needed and we will always tell MySQL that + we only reserved 1 value. Since we are using an atomic value for + m_auto_incr_val this should be safe - if we had to grab a mutex, doing + an actual reserve of some values might be a better solution. + */ + DEBUG_SYNC(ha_thd(), "rocksdb.autoinc_vars"); + + if (off > inc) { + off = 1; + } + + // Local variable reference to simplify code below + std::atomic &auto_incr = m_tbl_def->m_auto_incr_val; + + if (inc == 1 && off == 1) { + // Optimization for the standard case where we are always simply + // incrementing from the last position + *first_value = auto_incr++; + } else { + // The next value can be more complicated if either `inc` or 'off' is not 1 + longlong new_val; + longlong last_val = auto_incr; + + // Loop until we can correctly update the atomic value + do { + // Calculate the next value in the auto increment series: + // offset + N * increment + // where N is 0, 1, 2, ... + // + // For further information please visit: + // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html + new_val = ((last_val + (inc - off) - 1) / inc) * inc + off; + + // Attempt to store the new value (plus 1 since m_auto_incr_val contains + // the next available value) into the atomic value. If the current + // value no longer matches what we have in 'last_val' this will fail and + // we will repeat the loop (`last_val` will automatically get updated + // with the current value). + } while (!auto_incr.compare_exchange_weak(last_val, new_val + 1)); + + *first_value = new_val; + } + + *nb_reserved_values = 1; +} + +#ifndef DBUG_OFF + +/* Debugger help function */ +static char dbug_item_print_buf[512]; + +const char *dbug_print_item(Item *const item) { + char *const buf = dbug_item_print_buf; + String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin); + str.length(0); + if (!item) + return "(Item*)nullptr"; + item->print(&str, QT_ORDINARY); + if (str.c_ptr() == buf) + return buf; + else + return "Couldn't fit into buffer"; +} + +#endif /*DBUG_OFF*/ + +/** + SQL layer calls this function to push an index condition. + + @details + The condition is for index keyno (only one condition can be pushed at a + time). + The caller guarantees that condition refers only to index fields; besides + that, fields must have + + $field->part_of_key.set_bit(keyno) + + which means that + + (handler->index_flags(keyno, $keypart, 0) & HA_KEYREAD_ONLY) == 1 + + which means that field value can be restored from the index tuple. + + @return + Part of condition we couldn't check (always nullptr). +*/ + +class Item *ha_rocksdb::idx_cond_push(uint keyno, class Item *const idx_cond) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(keyno != MAX_KEY); + DBUG_ASSERT(idx_cond != nullptr); + + pushed_idx_cond = idx_cond; + pushed_idx_cond_keyno = keyno; + in_range_check_pushed_down = TRUE; + + /* We will check the whole condition */ + DBUG_RETURN(nullptr); +} + +/** + Checking if an index is used for ascending scan or not + + @detail + Currently RocksDB does not support bloom filter for + prefix lookup + descending scan, but supports bloom filter for + prefix lookup + ascending scan. This function returns true if + the scan pattern is absolutely ascending. + @param kd + @param find_flag +*/ +bool ha_rocksdb::is_ascending(const Rdb_key_def &kd, + enum ha_rkey_function find_flag) const { + bool is_ascending; + switch (find_flag) { + case HA_READ_KEY_EXACT: { + is_ascending = !kd.m_is_reverse_cf; + break; + } + case HA_READ_PREFIX: { + is_ascending = true; + break; + } + case HA_READ_KEY_OR_NEXT: + case HA_READ_AFTER_KEY: { + is_ascending = !kd.m_is_reverse_cf; + break; + } + case HA_READ_KEY_OR_PREV: + case HA_READ_BEFORE_KEY: + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: { + is_ascending = kd.m_is_reverse_cf; + break; + } + default: + is_ascending = false; + } + return is_ascending; +} + +/* + Checks if inplace alter is supported for a given operation. +*/ + +my_core::enum_alter_inplace_result ha_rocksdb::check_if_supported_inplace_alter( + TABLE *altered_table, my_core::Alter_inplace_info *const ha_alter_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(ha_alter_info != nullptr); + + if (ha_alter_info->handler_flags & + ~(my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ALTER_PARTITIONED | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { + DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + /* We don't support unique keys on table w/ no primary keys */ + if ((ha_alter_info->handler_flags & + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX) && + has_hidden_pk(altered_table)) { + DBUG_RETURN(my_core::HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + DBUG_RETURN(my_core::HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE); +} + +/** + Allows the storage engine to update internal structures with concurrent + writes blocked. If check_if_supported_inplace_alter() returns + HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or + HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with + exclusive lock otherwise the same level of locking as for + inplace_alter_table() will be used. + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function reports error, commit_inplace_alter_table() + will be called with commit= false. + + @note For partitioning, failing to prepare one partition, means that + commit_inplace_alter_table() will be called to roll back changes for + all partitions. This means that commit_inplace_alter_table() might be + called without prepare_inplace_alter_table() having been called first + for a given partition. + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval true Error + @retval false Success +*/ +bool ha_rocksdb::prepare_inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(altered_table != nullptr); + DBUG_ASSERT(ha_alter_info != nullptr); + + const uint n_dropped_keys = ha_alter_info->index_drop_count; + const uint n_added_keys = ha_alter_info->index_add_count; + const uint old_n_keys = m_tbl_def->m_key_count; + uint new_n_keys = altered_table->s->keys; + + if (has_hidden_pk(altered_table)) { + new_n_keys += 1; + } + + std::shared_ptr *const old_key_descr = + m_tbl_def->m_key_descr_arr; + std::shared_ptr *const new_key_descr = + new std::shared_ptr[new_n_keys]; + + Rdb_tbl_def *const new_tdef = new Rdb_tbl_def(m_tbl_def->full_tablename()); + new_tdef->m_key_descr_arr = new_key_descr; + new_tdef->m_key_count = new_n_keys; + new_tdef->m_auto_incr_val = + m_tbl_def->m_auto_incr_val.load(std::memory_order_relaxed); + new_tdef->m_hidden_pk_val = + m_tbl_def->m_hidden_pk_val.load(std::memory_order_relaxed); + + if (ha_alter_info->handler_flags & + (my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX) && + create_key_defs(altered_table, new_tdef, table, m_tbl_def)) { + /* Delete the new key descriptors */ + delete[] new_key_descr; + + /* + Explicitly mark as nullptr so we don't accidentally remove entries + from data dictionary on cleanup (or cause double delete[]). + */ + new_tdef->m_key_descr_arr = nullptr; + delete new_tdef; + + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks failed creating new key definitions during alter.", + MYF(0)); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + std::unordered_set> added_indexes; + std::unordered_set dropped_index_ids; + + uint i; + uint j; + const KEY *key; + + /* Determine which(if any) key definition(s) need to be dropped */ + for (i = 0; i < ha_alter_info->index_drop_count; i++) { + const KEY *const key = ha_alter_info->index_drop_buffer[i]; + for (j = 0; j < old_n_keys; j++) { + if (!old_key_descr[j]->m_name.compare(key->name)) { + dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); + break; + } + } + } + + DBUG_ASSERT(dropped_index_ids.size() == ha_alter_info->index_drop_count); + + /* Determine which(if any) key definitions(s) need to be added */ + for (i = 0; i < ha_alter_info->index_add_count; i++) { + key = &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; + for (j = 0; j < new_n_keys; j++) { + if (!new_key_descr[j]->m_name.compare(key->name)) { + added_indexes.insert(new_key_descr[j]); + break; + } + } + } + + DBUG_ASSERT(added_indexes.size() == ha_alter_info->index_add_count); + + ha_alter_info->handler_ctx = new Rdb_inplace_alter_ctx( + new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, + added_indexes, dropped_index_ids, n_added_keys, n_dropped_keys); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/** + Alter the table structure in-place with operations specified using + HA_ALTER_FLAGS and Alter_inplace_info. The level of concurrency allowed + during this operation depends on the return value from + check_if_supported_inplace_alter(). + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function reports error, commit_inplace_alter_table() + will be called with commit= false. + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval true Error + @retval false Success +*/ +bool ha_rocksdb::inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(altered_table != nullptr); + DBUG_ASSERT(ha_alter_info != nullptr); + DBUG_ASSERT(ha_alter_info->handler_ctx != nullptr); + + Rdb_inplace_alter_ctx *const ctx = + static_cast(ha_alter_info->handler_ctx); + + if (ha_alter_info->handler_flags & + (my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { + /* + Buffers need to be set up again to account for new, possibly longer + secondary keys. + */ + free_key_buffers(); + + DBUG_ASSERT(ctx != nullptr); + + /* + If adding unique index, allocate special buffers for duplicate checking. + */ + int err; + if ((err = alloc_key_buffers( + altered_table, ctx->m_new_tdef, + ha_alter_info->handler_flags & + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX))) { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks failed allocating key buffers during alter.", + MYF(0)); + DBUG_RETURN(err); + } + + /* Populate all new secondary keys by scanning the primary key. */ + if ((err = inplace_populate_sk(altered_table, ctx->m_added_indexes))) { + my_printf_error(ER_UNKNOWN_ERROR, + "MyRocks failed populating secondary key during alter.", + MYF(0)); + DBUG_RETURN(HA_EXIT_FAILURE); + } + } + + DBUG_EXECUTE_IF("myrocks_simulate_index_create_rollback", { + dbug_create_err_inplace_alter(); + DBUG_RETURN(HA_EXIT_FAILURE); + };); + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +/** + Scan the Primary Key index entries and populate the new secondary keys. +*/ +int ha_rocksdb::inplace_populate_sk( + TABLE *const new_table_arg, + const std::unordered_set> &indexes) { + DBUG_ENTER_FUNC(); + + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + /* Update the data dictionary */ + std::unordered_set create_index_ids; + for (const auto &index : indexes) { + create_index_ids.insert(index->get_gl_index_id()); + } + dict_manager.add_create_index(create_index_ids, batch); + dict_manager.commit(batch); + + /* + Add uncommitted key definitons to ddl_manager. We need to do this + so that the property collector can find this keydef when it needs to + update stats. The property collector looks for the keydef in the + data dictionary, but it won't be there yet since this key definition + is still in the creation process. + */ + ddl_manager.add_uncommitted_keydefs(indexes); + + const bool hidden_pk_exists = has_hidden_pk(table); + + int res = 0; + Rdb_transaction *tx = get_or_create_tx(table->in_use); + + /* + There is one specific scenario where m_sst_info may not be nullptr. This + happens if the handler we're using happens to be the handler where the PK + bulk load was done on. The sequence of events that lead to this is as + follows (T1 is PK bulk load, T2 is SK alter table): + + T1: Execute last INSERT statement + T1: Return TABLE and handler object back to Table_cache_manager + T1: Close connection + T2: Execute ALTER statement + T2: Take same TABLE/handler from Table_cache_manager + T2: Call closefrm which will call finalize_bulk_load on every other open + table/handler *except* the one it's on. + T2: Acquire stale snapshot of PK + T1: Call finalize_bulk_load + + This is rare because usually, closefrm will call the destructor (and thus + finalize_bulk_load) on the handler where PK bulk load is done. However, if + the thread ids of the bulk load thread and the alter thread differ by a + multiple of table_cache_instances (8 by default), then they hash to the + same bucket in Table_cache_manager and the alter thread will not not call + the destructor on the handler it is holding. Thus, its m_sst_info will not + be nullptr. + + At this point, it is safe to refresh the snapshot because we know all other + open handlers have been closed at this point, and the one we're on is the + only one left. + */ + if (m_sst_info != nullptr) { + if ((res = finalize_bulk_load())) { + DBUG_RETURN(res); + } + tx->commit(); + } + + const ulonglong rdb_merge_buf_size = THDVAR(ha_thd(), merge_buf_size); + const ulonglong rdb_merge_combine_read_size = + THDVAR(ha_thd(), merge_combine_read_size); + + for (const auto &index : indexes) { + const rocksdb::Comparator *index_comp = index->get_cf()->GetComparator(); + bool is_unique_index = + new_table_arg->key_info[index->get_keyno()].flags & HA_NOSAME; + + Rdb_index_merge rdb_merge(thd_rocksdb_tmpdir(), rdb_merge_buf_size, + rdb_merge_combine_read_size, index_comp); + + if ((res = rdb_merge.init())) { + DBUG_RETURN(res); + } + + /* + Note: We pass in the currently existing table + tbl_def object here, + as the pk index position may have changed in the case of hidden primary + keys. + */ + const uint pk = pk_index(table, m_tbl_def); + ha_index_init(pk, true); + + /* Scan each record in the primary key in order */ + for (res = index_first(table->record[0]); res == 0; + res = index_next(table->record[0])) { + longlong hidden_pk_id = 0; + if (hidden_pk_exists && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) { + // NO_LINT_DEBUG + sql_print_error("Error retrieving hidden pk id."); + ha_index_end(); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + /* Create new secondary index entry */ + const int new_packed_size = index->pack_record( + new_table_arg, m_pack_buffer, table->record[0], m_sk_packed_tuple, + &m_sk_tails, should_store_row_debug_checksums(), hidden_pk_id); + + const rocksdb::Slice key = rocksdb::Slice( + reinterpret_cast(m_sk_packed_tuple), new_packed_size); + const rocksdb::Slice val = + rocksdb::Slice(reinterpret_cast(m_sk_tails.ptr()), + m_sk_tails.get_current_pos()); + + /* + Add record to offset tree in preparation for writing out to + disk in sorted chunks. + */ + if ((res = rdb_merge.add(key, val))) { + ha_index_end(); + DBUG_RETURN(res); + } + } + + if (res != HA_ERR_END_OF_FILE) { + // NO_LINT_DEBUG + sql_print_error("Error retrieving index entry from primary key."); + ha_index_end(); + DBUG_RETURN(res); + } + + ha_index_end(); + + /* + Perform an n-way merge of n sorted buffers on disk, then writes all + results to RocksDB via SSTFileWriter API. + */ + rocksdb::Slice merge_key; + rocksdb::Slice merge_val; + + struct unique_sk_buf_info sk_info; + sk_info.dup_sk_buf = m_dup_sk_packed_tuple; + sk_info.dup_sk_buf_old = m_dup_sk_packed_tuple_old; + + while ((res = rdb_merge.next(&merge_key, &merge_val)) == 0) { + /* Perform uniqueness check if needed */ + if (is_unique_index) { + if (check_duplicate_sk(new_table_arg, *index, &merge_key, &sk_info)) { + /* + Duplicate entry found when trying to create unique secondary key. + We need to unpack the record into new_table_arg->record[0] as it + is used inside print_keydup_error so that the error message shows + the duplicate record. + */ + if (index->unpack_record(new_table_arg, new_table_arg->record[0], + &merge_key, nullptr, + m_verify_row_debug_checksums)) { + /* Should never reach here */ + DBUG_ASSERT(0); + } + + print_keydup_error(new_table_arg, + &new_table_arg->key_info[index->get_keyno()], + MYF(0)); + DBUG_RETURN(ER_DUP_ENTRY); + } + } + + /* + Insert key and slice to SST via SSTFileWriter API. + */ + if ((res = bulk_load_key(tx, *index, merge_key, merge_val))) { + break; + } + } + + /* + Here, res == -1 means that we are finished, while > 0 means an error + occurred. + */ + if (res > 0) { + // NO_LINT_DEBUG + sql_print_error("Error while bulk loading keys in external merge sort."); + DBUG_RETURN(res); + } + + if ((res = tx->finish_bulk_load())) { + // NO_LINT_DEBUG + sql_print_error("Error finishing bulk load."); + DBUG_RETURN(res); + } + } + + /* + Explicitly tell jemalloc to clean up any unused dirty pages at this point. + See https://reviews.facebook.net/D63723 for more details. + */ + purge_all_jemalloc_arenas(); + + DBUG_EXECUTE_IF("crash_during_online_index_creation", DBUG_SUICIDE();); + DBUG_RETURN(res); +} + +/** + Commit or rollback the changes made during prepare_inplace_alter_table() + and inplace_alter_table() inside the storage engine. + Note that in case of rollback the allowed level of concurrency during + this operation will be the same as for inplace_alter_table() and thus + might be higher than during prepare_inplace_alter_table(). (For example, + concurrent writes were blocked during prepare, but might not be during + rollback). + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function with commit= true reports error, it will be called + again with commit= false. + + @note In case of partitioning, this function might be called for rollback + without prepare_inplace_alter_table() having been called first. + Also partitioned tables sets ha_alter_info->group_commit_ctx to a NULL + terminated array of the partitions handlers and if all of them are + committed as one, then group_commit_ctx should be set to NULL to indicate + to the partitioning handler that all partitions handlers are committed. + @see prepare_inplace_alter_table(). + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + @param commit True => Commit, False => Rollback. + + @retval true Error + @retval false Success +*/ +bool ha_rocksdb::commit_inplace_alter_table( + my_core::TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info, bool commit) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(altered_table != nullptr); + DBUG_ASSERT(ha_alter_info != nullptr); + + Rdb_inplace_alter_ctx *const ctx0 = + static_cast(ha_alter_info->handler_ctx); + + DEBUG_SYNC(ha_thd(), "rocksdb.commit_in_place_alter_table"); + + /* + IMPORTANT: When rollback is requested, mysql will abort with + an assertion failure. That means every failed commit during inplace alter + table will result in a fatal error on the server. Indexes ongoing creation + will be detected when the server restarts, and dropped. + + For partitioned tables, a rollback call to this function (commit == false) + is done for each partition. A successful commit call only executes once + for all partitions. + */ + if (!commit) { + /* If ctx has not been created yet, nothing to do here */ + if (!ctx0) { + DBUG_RETURN(HA_EXIT_SUCCESS); + } + + /* + Cannot call destructor for Rdb_tbl_def directly because we don't want to + erase the mappings inside the ddl_manager, as the old_key_descr is still + using them. + */ + if (ctx0->m_new_key_descr) { + /* Delete the new key descriptors */ + for (uint i = 0; i < ctx0->m_new_tdef->m_key_count; i++) { + ctx0->m_new_key_descr[i] = nullptr; + } + + delete[] ctx0->m_new_key_descr; + ctx0->m_new_key_descr = nullptr; + ctx0->m_new_tdef->m_key_descr_arr = nullptr; + + delete ctx0->m_new_tdef; + } + + /* Remove uncommitted key definitons from ddl_manager */ + ddl_manager.remove_uncommitted_keydefs(ctx0->m_added_indexes); + + /* Rollback any partially created indexes */ + dict_manager.rollback_ongoing_index_creation(); + + DBUG_RETURN(HA_EXIT_SUCCESS); + } + + DBUG_ASSERT(ctx0); + + /* + For partitioned tables, we need to commit all changes to all tables at + once, unlike in the other inplace alter API methods. + */ + inplace_alter_handler_ctx **ctx_array; + inplace_alter_handler_ctx *ctx_single[2]; + + if (ha_alter_info->group_commit_ctx) { + DBUG_EXECUTE_IF("crash_during_index_creation_partition", DBUG_SUICIDE();); + ctx_array = ha_alter_info->group_commit_ctx; + } else { + ctx_single[0] = ctx0; + ctx_single[1] = nullptr; + ctx_array = ctx_single; + } + + DBUG_ASSERT(ctx0 == ctx_array[0]); + ha_alter_info->group_commit_ctx = nullptr; + + if (ha_alter_info->handler_flags & + (my_core::Alter_inplace_info::DROP_INDEX | + my_core::Alter_inplace_info::DROP_UNIQUE_INDEX | + my_core::Alter_inplace_info::ADD_INDEX | + my_core::Alter_inplace_info::ADD_UNIQUE_INDEX)) { + const std::unique_ptr wb = dict_manager.begin(); + rocksdb::WriteBatch *const batch = wb.get(); + std::unordered_set create_index_ids; + + m_tbl_def = ctx0->m_new_tdef; + m_key_descr_arr = m_tbl_def->m_key_descr_arr; + m_pk_descr = m_key_descr_arr[pk_index(altered_table, m_tbl_def)]; + + dict_manager.lock(); + for (inplace_alter_handler_ctx **pctx = ctx_array; *pctx; pctx++) { + Rdb_inplace_alter_ctx *const ctx = + static_cast(*pctx); + + /* Mark indexes to be dropped */ + dict_manager.add_drop_index(ctx->m_dropped_index_ids, batch); + + for (const auto &index : ctx->m_added_indexes) { + create_index_ids.insert(index->get_gl_index_id()); + } + + if (ddl_manager.put_and_write(ctx->m_new_tdef, batch)) { + /* + Failed to write new entry into data dictionary, this should never + happen. + */ + DBUG_ASSERT(0); + } + + /* + Remove uncommitted key definitons from ddl_manager, as they are now + committed into the data dictionary. + */ + ddl_manager.remove_uncommitted_keydefs(ctx->m_added_indexes); + } + + if (dict_manager.commit(batch)) { + /* + Should never reach here. We assume MyRocks will abort if commit fails. + */ + DBUG_ASSERT(0); + } + + dict_manager.unlock(); + + /* Mark ongoing create indexes as finished/remove from data dictionary */ + dict_manager.finish_indexes_operation( + create_index_ids, Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + /* + We need to recalculate the index stats here manually. The reason is that + the secondary index does not exist inside + m_index_num_to_keydef until it is committed to the data dictionary, which + prevents us from updating the stats normally as the ddl_manager cannot + find the proper gl_index_ids yet during adjust_stats calls. + */ + if (calculate_stats(altered_table, nullptr, nullptr)) { + /* Failed to update index statistics, should never happen */ + DBUG_ASSERT(0); + } + + rdb_drop_idx_thread.signal(); + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + +#define SHOW_FNAME(name) rocksdb_show_##name + +#define DEF_SHOW_FUNC(name, key) \ + static int SHOW_FNAME(name)(MYSQL_THD thd, SHOW_VAR * var, char *buff) { \ + rocksdb_status_counters.name = \ + rocksdb_stats->getTickerCount(rocksdb::key); \ + var->type = SHOW_LONGLONG; \ + var->value = (char *)&rocksdb_status_counters.name; \ + return HA_EXIT_SUCCESS; \ + } + +#define DEF_STATUS_VAR(name) \ + { "rocksdb_" #name, (char *)&SHOW_FNAME(name), SHOW_FUNC } + +#define DEF_STATUS_VAR_PTR(name, ptr, option) \ + { "rocksdb_" name, (char *)ptr, option } + +#define DEF_STATUS_VAR_FUNC(name, ptr, option) \ + { name, reinterpret_cast(ptr), option } + +struct rocksdb_status_counters_t { + uint64_t block_cache_miss; + uint64_t block_cache_hit; + uint64_t block_cache_add; + uint64_t block_cache_index_miss; + uint64_t block_cache_index_hit; + uint64_t block_cache_filter_miss; + uint64_t block_cache_filter_hit; + uint64_t block_cache_data_miss; + uint64_t block_cache_data_hit; + uint64_t bloom_filter_useful; + uint64_t memtable_hit; + uint64_t memtable_miss; + uint64_t compaction_key_drop_new; + uint64_t compaction_key_drop_obsolete; + uint64_t compaction_key_drop_user; + uint64_t number_keys_written; + uint64_t number_keys_read; + uint64_t number_keys_updated; + uint64_t bytes_written; + uint64_t bytes_read; + uint64_t no_file_closes; + uint64_t no_file_opens; + uint64_t no_file_errors; + uint64_t l0_slowdown_micros; + uint64_t memtable_compaction_micros; + uint64_t l0_num_files_stall_micros; + uint64_t rate_limit_delay_millis; + uint64_t num_iterators; + uint64_t number_multiget_get; + uint64_t number_multiget_keys_read; + uint64_t number_multiget_bytes_read; + uint64_t number_deletes_filtered; + uint64_t number_merge_failures; + uint64_t bloom_filter_prefix_checked; + uint64_t bloom_filter_prefix_useful; + uint64_t number_reseeks_iteration; + uint64_t getupdatessince_calls; + uint64_t block_cachecompressed_miss; + uint64_t block_cachecompressed_hit; + uint64_t wal_synced; + uint64_t wal_bytes; + uint64_t write_self; + uint64_t write_other; + uint64_t write_timedout; + uint64_t write_wal; + uint64_t flush_write_bytes; + uint64_t compact_read_bytes; + uint64_t compact_write_bytes; + uint64_t number_superversion_acquires; + uint64_t number_superversion_releases; + uint64_t number_superversion_cleanups; + uint64_t number_block_not_compressed; +}; + +static rocksdb_status_counters_t rocksdb_status_counters; + +DEF_SHOW_FUNC(block_cache_miss, BLOCK_CACHE_MISS) +DEF_SHOW_FUNC(block_cache_hit, BLOCK_CACHE_HIT) +DEF_SHOW_FUNC(block_cache_add, BLOCK_CACHE_ADD) +DEF_SHOW_FUNC(block_cache_index_miss, BLOCK_CACHE_INDEX_MISS) +DEF_SHOW_FUNC(block_cache_index_hit, BLOCK_CACHE_INDEX_HIT) +DEF_SHOW_FUNC(block_cache_filter_miss, BLOCK_CACHE_FILTER_MISS) +DEF_SHOW_FUNC(block_cache_filter_hit, BLOCK_CACHE_FILTER_HIT) +DEF_SHOW_FUNC(block_cache_data_miss, BLOCK_CACHE_DATA_MISS) +DEF_SHOW_FUNC(block_cache_data_hit, BLOCK_CACHE_DATA_HIT) +DEF_SHOW_FUNC(bloom_filter_useful, BLOOM_FILTER_USEFUL) +DEF_SHOW_FUNC(memtable_hit, MEMTABLE_HIT) +DEF_SHOW_FUNC(memtable_miss, MEMTABLE_MISS) +DEF_SHOW_FUNC(compaction_key_drop_new, COMPACTION_KEY_DROP_NEWER_ENTRY) +DEF_SHOW_FUNC(compaction_key_drop_obsolete, COMPACTION_KEY_DROP_OBSOLETE) +DEF_SHOW_FUNC(compaction_key_drop_user, COMPACTION_KEY_DROP_USER) +DEF_SHOW_FUNC(number_keys_written, NUMBER_KEYS_WRITTEN) +DEF_SHOW_FUNC(number_keys_read, NUMBER_KEYS_READ) +DEF_SHOW_FUNC(number_keys_updated, NUMBER_KEYS_UPDATED) +DEF_SHOW_FUNC(bytes_written, BYTES_WRITTEN) +DEF_SHOW_FUNC(bytes_read, BYTES_READ) +DEF_SHOW_FUNC(no_file_closes, NO_FILE_CLOSES) +DEF_SHOW_FUNC(no_file_opens, NO_FILE_OPENS) +DEF_SHOW_FUNC(no_file_errors, NO_FILE_ERRORS) +DEF_SHOW_FUNC(l0_slowdown_micros, STALL_L0_SLOWDOWN_MICROS) +DEF_SHOW_FUNC(memtable_compaction_micros, STALL_MEMTABLE_COMPACTION_MICROS) +DEF_SHOW_FUNC(l0_num_files_stall_micros, STALL_L0_NUM_FILES_MICROS) +DEF_SHOW_FUNC(rate_limit_delay_millis, RATE_LIMIT_DELAY_MILLIS) +DEF_SHOW_FUNC(num_iterators, NO_ITERATORS) +DEF_SHOW_FUNC(number_multiget_get, NUMBER_MULTIGET_CALLS) +DEF_SHOW_FUNC(number_multiget_keys_read, NUMBER_MULTIGET_KEYS_READ) +DEF_SHOW_FUNC(number_multiget_bytes_read, NUMBER_MULTIGET_BYTES_READ) +DEF_SHOW_FUNC(number_deletes_filtered, NUMBER_FILTERED_DELETES) +DEF_SHOW_FUNC(number_merge_failures, NUMBER_MERGE_FAILURES) +DEF_SHOW_FUNC(bloom_filter_prefix_checked, BLOOM_FILTER_PREFIX_CHECKED) +DEF_SHOW_FUNC(bloom_filter_prefix_useful, BLOOM_FILTER_PREFIX_USEFUL) +DEF_SHOW_FUNC(number_reseeks_iteration, NUMBER_OF_RESEEKS_IN_ITERATION) +DEF_SHOW_FUNC(getupdatessince_calls, GET_UPDATES_SINCE_CALLS) +DEF_SHOW_FUNC(block_cachecompressed_miss, BLOCK_CACHE_COMPRESSED_MISS) +DEF_SHOW_FUNC(block_cachecompressed_hit, BLOCK_CACHE_COMPRESSED_HIT) +DEF_SHOW_FUNC(wal_synced, WAL_FILE_SYNCED) +DEF_SHOW_FUNC(wal_bytes, WAL_FILE_BYTES) +DEF_SHOW_FUNC(write_self, WRITE_DONE_BY_SELF) +DEF_SHOW_FUNC(write_other, WRITE_DONE_BY_OTHER) +DEF_SHOW_FUNC(write_timedout, WRITE_TIMEDOUT) +DEF_SHOW_FUNC(write_wal, WRITE_WITH_WAL) +DEF_SHOW_FUNC(flush_write_bytes, FLUSH_WRITE_BYTES) +DEF_SHOW_FUNC(compact_read_bytes, COMPACT_READ_BYTES) +DEF_SHOW_FUNC(compact_write_bytes, COMPACT_WRITE_BYTES) +DEF_SHOW_FUNC(number_superversion_acquires, NUMBER_SUPERVERSION_ACQUIRES) +DEF_SHOW_FUNC(number_superversion_releases, NUMBER_SUPERVERSION_RELEASES) +DEF_SHOW_FUNC(number_superversion_cleanups, NUMBER_SUPERVERSION_CLEANUPS) +DEF_SHOW_FUNC(number_block_not_compressed, NUMBER_BLOCK_NOT_COMPRESSED) + +static void myrocks_update_status() { + export_stats.rows_deleted = global_stats.rows[ROWS_DELETED]; + export_stats.rows_inserted = global_stats.rows[ROWS_INSERTED]; + export_stats.rows_read = global_stats.rows[ROWS_READ]; + export_stats.rows_updated = global_stats.rows[ROWS_UPDATED]; + export_stats.rows_deleted_blind = global_stats.rows[ROWS_DELETED_BLIND]; + + export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED]; + export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED]; + export_stats.system_rows_read = global_stats.system_rows[ROWS_READ]; + export_stats.system_rows_updated = global_stats.system_rows[ROWS_UPDATED]; +} + +static SHOW_VAR myrocks_status_variables[] = { + DEF_STATUS_VAR_FUNC("rows_deleted", &export_stats.rows_deleted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_inserted", &export_stats.rows_inserted, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_deleted_blind", + &export_stats.rows_deleted_blind, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_deleted", + &export_stats.system_rows_deleted, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_inserted", + &export_stats.system_rows_inserted, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_read", &export_stats.system_rows_read, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("system_rows_updated", + &export_stats.system_rows_updated, SHOW_LONGLONG), + + {NullS, NullS, SHOW_LONG}}; + +static void show_myrocks_vars(THD *thd, SHOW_VAR *var, char *buff) { + myrocks_update_status(); + var->type = SHOW_ARRAY; + var->value = reinterpret_cast(&myrocks_status_variables); +} + +static SHOW_VAR rocksdb_status_vars[] = { + DEF_STATUS_VAR(block_cache_miss), + DEF_STATUS_VAR(block_cache_hit), + DEF_STATUS_VAR(block_cache_add), + DEF_STATUS_VAR(block_cache_index_miss), + DEF_STATUS_VAR(block_cache_index_hit), + DEF_STATUS_VAR(block_cache_filter_miss), + DEF_STATUS_VAR(block_cache_filter_hit), + DEF_STATUS_VAR(block_cache_data_miss), + DEF_STATUS_VAR(block_cache_data_hit), + DEF_STATUS_VAR(bloom_filter_useful), + DEF_STATUS_VAR(memtable_hit), + DEF_STATUS_VAR(memtable_miss), + DEF_STATUS_VAR(compaction_key_drop_new), + DEF_STATUS_VAR(compaction_key_drop_obsolete), + DEF_STATUS_VAR(compaction_key_drop_user), + DEF_STATUS_VAR(number_keys_written), + DEF_STATUS_VAR(number_keys_read), + DEF_STATUS_VAR(number_keys_updated), + DEF_STATUS_VAR(bytes_written), + DEF_STATUS_VAR(bytes_read), + DEF_STATUS_VAR(no_file_closes), + DEF_STATUS_VAR(no_file_opens), + DEF_STATUS_VAR(no_file_errors), + DEF_STATUS_VAR(l0_slowdown_micros), + DEF_STATUS_VAR(memtable_compaction_micros), + DEF_STATUS_VAR(l0_num_files_stall_micros), + DEF_STATUS_VAR(rate_limit_delay_millis), + DEF_STATUS_VAR(num_iterators), + DEF_STATUS_VAR(number_multiget_get), + DEF_STATUS_VAR(number_multiget_keys_read), + DEF_STATUS_VAR(number_multiget_bytes_read), + DEF_STATUS_VAR(number_deletes_filtered), + DEF_STATUS_VAR(number_merge_failures), + DEF_STATUS_VAR(bloom_filter_prefix_checked), + DEF_STATUS_VAR(bloom_filter_prefix_useful), + DEF_STATUS_VAR(number_reseeks_iteration), + DEF_STATUS_VAR(getupdatessince_calls), + DEF_STATUS_VAR(block_cachecompressed_miss), + DEF_STATUS_VAR(block_cachecompressed_hit), + DEF_STATUS_VAR(wal_synced), + DEF_STATUS_VAR(wal_bytes), + DEF_STATUS_VAR(write_self), + DEF_STATUS_VAR(write_other), + DEF_STATUS_VAR(write_timedout), + DEF_STATUS_VAR(write_wal), + DEF_STATUS_VAR(flush_write_bytes), + DEF_STATUS_VAR(compact_read_bytes), + DEF_STATUS_VAR(compact_write_bytes), + DEF_STATUS_VAR(number_superversion_acquires), + DEF_STATUS_VAR(number_superversion_releases), + DEF_STATUS_VAR(number_superversion_cleanups), + DEF_STATUS_VAR(number_block_not_compressed), + DEF_STATUS_VAR_PTR("snapshot_conflict_errors", + &rocksdb_snapshot_conflict_errors, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("wal_group_syncs", &rocksdb_wal_group_syncs, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_stat_computes", &rocksdb_number_stat_computes, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_put", &rocksdb_num_sst_entry_put, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_delete", &rocksdb_num_sst_entry_delete, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_singledelete", + &rocksdb_num_sst_entry_singledelete, SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_merge", &rocksdb_num_sst_entry_merge, + SHOW_LONGLONG), + DEF_STATUS_VAR_PTR("number_sst_entry_other", &rocksdb_num_sst_entry_other, + SHOW_LONGLONG), + {"rocksdb", reinterpret_cast(&show_myrocks_vars), SHOW_FUNC}, + {NullS, NullS, SHOW_LONG}}; + +/* + Background thread's main logic +*/ + +void Rdb_background_thread::run() { + // How many seconds to wait till flushing the WAL next time. + const int WAKE_UP_INTERVAL = 1; + + timespec ts_next_sync; + set_timespec(ts_next_sync, WAKE_UP_INTERVAL); + + for (;;) { + // Wait until the next timeout or until we receive a signal to stop the + // thread. Request to stop the thread should only be triggered when the + // storage engine is being unloaded. + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + const auto ret MY_ATTRIBUTE((__unused__)) = + mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts_next_sync); + + // Check that we receive only the expected error codes. + DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT); + const bool local_stop = m_stop; + const bool local_save_stats = m_save_stats; + reset(); + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); + + if (local_stop) { + // If we're here then that's because condition variable was signaled by + // another thread and we're shutting down. Break out the loop to make + // sure that shutdown thread can proceed. + break; + } + + // This path should be taken only when the timer expired. + DBUG_ASSERT(ret == ETIMEDOUT); + + if (local_save_stats) { + ddl_manager.persist_stats(); + } + + // Set the next timestamp for mysql_cond_timedwait() (which ends up calling + // pthread_cond_timedwait()) to wait on. + set_timespec(ts_next_sync, WAKE_UP_INTERVAL); + + // Flush the WAL. + if (rdb && rocksdb_background_sync) { + DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes); + const rocksdb::Status s = rdb->SyncWAL(); + if (!s.ok()) { + rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); + } + } + } + + // save remaining stats which might've left unsaved + ddl_manager.persist_stats(); +} + +/** + Deciding if it is possible to use bloom filter or not. + + @detail + Even if bloom filter exists, it is not always possible + to use bloom filter. If using bloom filter when you shouldn't, + false negative may happen -- fewer rows than expected may be returned. + It is users' responsibility to use bloom filter correctly. + + If bloom filter does not exist, return value does not matter because + RocksDB does not use bloom filter internally. + + @param kd + @param eq_cond Equal condition part of the key. This always includes + system index id (4 bytes). + @param use_all_keys True if all key parts are set with equal conditions. + This is aware of extended keys. +*/ +bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, + const rocksdb::Slice &eq_cond, + const bool use_all_keys, bool is_ascending) { + bool can_use = false; + + if (THDVAR(thd, skip_bloom_filter_on_read)) { + return can_use; + } + + const rocksdb::SliceTransform *prefix_extractor = kd.get_extractor(); + if (prefix_extractor) { + /* + This is an optimized use case for CappedPrefixTransform. + If eq_cond length >= prefix extractor length and if + all keys are used for equal lookup, it is + always possible to use bloom filter. + + Prefix bloom filter can't be used on descending scan with + prefix lookup (i.e. WHERE id1=1 ORDER BY id2 DESC), because of + RocksDB's limitation. On ascending (or not sorting) scan, + keys longer than the capped prefix length will be truncated down + to the capped length and the resulting key is added to the bloom filter. + + Keys shorter than the capped prefix length will be added to + the bloom filter. When keys are looked up, key conditionals + longer than the capped length can be used; key conditionals + shorter require all parts of the key to be available + for the short key match. + */ + if ((use_all_keys && prefix_extractor->InRange(eq_cond)) + || prefix_extractor->SameResultWhenAppended(eq_cond)) + can_use = true; + else + can_use = false; + } else { + /* + if prefix extractor is not defined, all key parts have to be + used by eq_cond. + */ + if (use_all_keys) + can_use = true; + else + can_use = false; + } + + return can_use; +} + +/* For modules that need access to the global data structures */ +rocksdb::TransactionDB *rdb_get_rocksdb_db() { return rdb; } + +Rdb_cf_manager &rdb_get_cf_manager() { return cf_manager; } + +rocksdb::BlockBasedTableOptions &rdb_get_table_options() { + return rocksdb_tbl_options; +} + +int rdb_get_table_perf_counters(const char *const tablename, + Rdb_perf_counters *const counters) { + DBUG_ASSERT(counters != nullptr); + DBUG_ASSERT(tablename != nullptr); + + Rdb_table_handler *table_handler; + table_handler = rdb_open_tables.get_table_handler(tablename); + if (table_handler == nullptr) { + return HA_ERR_INTERNAL_ERROR; + } + + counters->load(table_handler->m_table_perf_context); + + rdb_open_tables.release_table_handler(table_handler); + return HA_EXIT_SUCCESS; +} + +const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type) { + // If this assertion fails then this means that a member has been either added + // to or removed from RDB_IO_ERROR_TYPE enum and this function needs to be + // changed to return the appropriate value. + static_assert(RDB_IO_ERROR_LAST == 4, "Please handle all the error types."); + + switch (err_type) { + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_TX_COMMIT: + return "RDB_IO_ERROR_TX_COMMIT"; + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_DICT_COMMIT: + return "RDB_IO_ERROR_DICT_COMMIT"; + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_BG_THREAD: + return "RDB_IO_ERROR_BG_THREAD"; + case RDB_IO_ERROR_TYPE::RDB_IO_ERROR_GENERAL: + return "RDB_IO_ERROR_GENERAL"; + default: + DBUG_ASSERT(false); + return "(unknown)"; + } +} + +// In case of core dump generation we want this function NOT to be optimized +// so that we can capture as much data as possible to debug the root cause +// more efficiently. +#ifdef __GNUC__ +#pragma GCC push_options +#pragma GCC optimize("O0") +#endif + +void rdb_handle_io_error(const rocksdb::Status status, + const RDB_IO_ERROR_TYPE err_type) { + if (status.IsIOError()) { + switch (err_type) { + case RDB_IO_ERROR_TX_COMMIT: + case RDB_IO_ERROR_DICT_COMMIT: { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: failed to write to WAL. Error type = %s, " + "status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting on WAL write error."); + abort_with_stack_traces(); + break; + } + case RDB_IO_ERROR_BG_THREAD: { + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: BG thread failed to write to RocksDB. " + "Error type = %s, status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + break; + } + case RDB_IO_ERROR_GENERAL: { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: failed on I/O. Error type = %s, " + "status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting on I/O error."); + abort_with_stack_traces(); + break; + } + default: + DBUG_ASSERT(0); + break; + } + } else if (status.IsCorruption()) { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: data corruption detected! Error type = %s, " + "status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting because of data corruption."); + abort_with_stack_traces(); + } else if (!status.ok()) { + switch (err_type) { + case RDB_IO_ERROR_DICT_COMMIT: { + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: failed to write to WAL (dictionary). " + "Error type = %s, status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + /* NO_LINT_DEBUG */ + sql_print_error("MyRocks: aborting on WAL write error."); + abort_with_stack_traces(); + break; + } + default: + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: failed to read/write in RocksDB. " + "Error type = %s, status code = %d, status = %s", + get_rdb_io_error_string(err_type), status.code(), + status.ToString().c_str()); + break; + } + } +} +#ifdef __GNUC__ +#pragma GCC pop_options +#endif + +Rdb_dict_manager *rdb_get_dict_manager(void) { return &dict_manager; } + +Rdb_ddl_manager *rdb_get_ddl_manager(void) { return &ddl_manager; } + +Rdb_binlog_manager *rdb_get_binlog_manager(void) { return &binlog_manager; } + +void rocksdb_set_compaction_options( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr, const void *const save) { + if (var_ptr && save) { + *(uint64_t *)var_ptr = *(const uint64_t *)save; + } + const Rdb_compact_params params = { + (uint64_t)rocksdb_compaction_sequential_deletes, + (uint64_t)rocksdb_compaction_sequential_deletes_window, + (uint64_t)rocksdb_compaction_sequential_deletes_file_size}; + if (properties_collector_factory) { + properties_collector_factory->SetCompactionParams(params); + } +} + +void rocksdb_set_table_stats_sampling_pct( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + const uint32_t new_val = *static_cast(save); + + if (new_val != rocksdb_table_stats_sampling_pct) { + rocksdb_table_stats_sampling_pct = new_val; + + if (properties_collector_factory) { + properties_collector_factory->SetTableStatsSamplingPct( + rocksdb_table_stats_sampling_pct); + } + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +/* + This function allows setting the rate limiter's bytes per second value + but only if the rate limiter is turned on which has to be done at startup. + If the rate is already 0 (turned off) or we are changing it to 0 (trying + to turn it off) this function will push a warning to the client and do + nothing. + This is similar to the code in innodb_doublewrite_update (found in + storage/innobase/handler/ha_innodb.cc). +*/ +void rocksdb_set_rate_limiter_bytes_per_sec( + my_core::THD *const thd, + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + const uint64_t new_val = *static_cast(save); + if (new_val == 0 || rocksdb_rate_limiter_bytes_per_sec == 0) { + /* + If a rate_limiter was not enabled at startup we can't change it nor + can we disable it if one was created at startup + */ + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, + "RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot " + "be dynamically changed to or from 0. Do a clean " + "shutdown if you want to change it from or to 0."); + } else if (new_val != rocksdb_rate_limiter_bytes_per_sec) { + /* Apply the new value to the rate limiter and store it locally */ + DBUG_ASSERT(rocksdb_rate_limiter != nullptr); + rocksdb_rate_limiter_bytes_per_sec = new_val; + rocksdb_rate_limiter->SetBytesPerSecond(new_val); + } +} + +void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var, + void *var_ptr, const void *save) { + const uint64_t new_val = *static_cast(save); + if (rocksdb_delayed_write_rate != new_val) { + rocksdb_delayed_write_rate = new_val; + rocksdb_db_options.delayed_write_rate = new_val; + } +} + +void rdb_set_collation_exception_list(const char *const exception_list) { + DBUG_ASSERT(rdb_collation_exceptions != nullptr); + + if (!rdb_collation_exceptions->set_patterns(exception_list)) { + my_core::warn_about_bad_patterns(rdb_collation_exceptions, + "strict_collation_exceptions"); + } +} + +void rocksdb_set_collation_exception_list(THD *const thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save) { + const char *const val = *static_cast(save); + + rdb_set_collation_exception_list(val == nullptr ? "" : val); + + //psergey-todo: what is the purpose of the below?? + const char *val_copy= val? my_strdup(val, MYF(0)): nullptr; + my_free(*static_cast(var_ptr)); + *static_cast(var_ptr) = val_copy; +} + +void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var + MY_ATTRIBUTE((__unused__)), + void *const var_ptr, const void *const save) { + Rdb_transaction *&tx = get_tx_from_thd(thd); + + if (tx != nullptr) { + const int rc = tx->finish_bulk_load(); + if (rc != 0) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Error %d finalizing last SST file while " + "setting bulk loading variable", + rc); + abort_with_stack_traces(); + } + } + + *static_cast(var_ptr) = *static_cast(save); +} + +static void rocksdb_set_max_background_compactions( + THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) { + DBUG_ASSERT(save != nullptr); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + rocksdb_db_options.max_background_compactions = + *static_cast(save); + rocksdb_db_options.env->SetBackgroundThreads( + rocksdb_db_options.max_background_compactions, + rocksdb::Env::Priority::LOW); + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +void rdb_queue_save_stats_request() { rdb_bg_thread.request_save_stats(); } + +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 + +void ha_rocksdb::rpl_before_delete_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_delete_rows = true; + + DBUG_VOID_RETURN; +} + +void ha_rocksdb::rpl_after_delete_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_delete_rows = false; + + DBUG_VOID_RETURN; +} + +void ha_rocksdb::rpl_before_update_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_update_rows = true; + + DBUG_VOID_RETURN; +} + +void ha_rocksdb::rpl_after_update_rows() { + DBUG_ENTER_FUNC(); + + m_in_rpl_update_rows = false; + + DBUG_VOID_RETURN; +} + +/** + @brief + Read Free Replication can be used or not. Returning False means + Read Free Replication can be used. Read Free Replication can be used + on UPDATE or DELETE row events, and table must have user defined + primary key. +*/ +bool ha_rocksdb::use_read_free_rpl() { + DBUG_ENTER_FUNC(); + + DBUG_RETURN((m_in_rpl_delete_rows || m_in_rpl_update_rows) && + !has_hidden_pk(table) && m_use_read_free_rpl); +} +#endif // MARIAROCKS_NOT_YET + +double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) { + DBUG_ENTER_FUNC(); + + if (index != table->s->primary_key) { + /* Non covering index range scan */ + DBUG_RETURN(handler::read_time(index, ranges, rows)); + } + + DBUG_RETURN((rows / 20.0) + 1); +} + +} // namespace myrocks + +/* + Register the storage engine plugin outside of myrocks namespace + so that mysql_declare_plugin does not get confused when it does + its name generation. +*/ + +struct st_mysql_storage_engine rocksdb_storage_engine = { + MYSQL_HANDLERTON_INTERFACE_VERSION}; + +maria_declare_plugin(rocksdb_se){ + MYSQL_STORAGE_ENGINE_PLUGIN, /* Plugin Type */ + &rocksdb_storage_engine, /* Plugin Descriptor */ + "ROCKSDB", /* Plugin Name */ + "Monty Program Ab", /* Plugin Author */ + "RocksDB storage engine", /* Plugin Description */ + PLUGIN_LICENSE_GPL, /* Plugin Licence */ + myrocks::rocksdb_init_func, /* Plugin Entry Point */ + myrocks::rocksdb_done_func, /* Plugin Deinitializer */ + 0x0001, /* version number (0.1) */ + myrocks::rocksdb_status_vars, /* status variables */ + myrocks::rocksdb_system_variables, /* system variables */ + "1.0", /* string version */ + MariaDB_PLUGIN_MATURITY_ALPHA /* maturity */ +}, + myrocks::rdb_i_s_cfstats, myrocks::rdb_i_s_dbstats, + myrocks::rdb_i_s_perf_context, myrocks::rdb_i_s_perf_context_global, + myrocks::rdb_i_s_cfoptions, myrocks::rdb_i_s_compact_stats, + myrocks::rdb_i_s_global_info, myrocks::rdb_i_s_ddl, + myrocks::rdb_i_s_index_file_map, myrocks::rdb_i_s_lock_info, + myrocks::rdb_i_s_trx_info +maria_declare_plugin_end; diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h new file mode 100644 index 0000000000000..5852ad4e95446 --- /dev/null +++ b/storage/rocksdb/ha_rocksdb.h @@ -0,0 +1,1262 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +/* C++ standard header files */ +#include +#include +#include +#include + +/* MySQL header files */ +#include "./handler.h" /* handler */ +#include "./my_global.h" /* ulonglong */ +#include "./sql_string.h" + +#ifdef _WIN32 +#undef pthread_key_create +#undef pthread_key_delete +#undef pthread_setspecific +#undef pthread_getspecific +#endif + + +/* RocksDB header files */ +#include "rocksdb/cache.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/statistics.h" +#include "rocksdb/utilities/options_util.h" +#include "rocksdb/utilities/transaction_db.h" +#include "rocksdb/utilities/write_batch_with_index.h" + +/* MyRocks header files */ +#include "./rdb_comparator.h" +#include "./rdb_index_merge.h" +#include "./rdb_perf_context.h" +#include "./rdb_sst_info.h" +#include "./rdb_utils.h" +#include "./ut0counter.h" + +class Field_blob; +class Field_varstring; +/** + @note MyRocks Coding Conventions: + MyRocks code follows the baseline MySQL coding conventions, available at + http://dev.mysql.com/doc/internals/en/coding-guidelines.html, with several + refinements (@see /storage/rocksdb/README file). +*/ + +namespace myrocks { + +/* + * class for exporting transaction information for + * information_schema.rocksdb_trx + */ +struct Rdb_trx_info { + std::string name; + ulonglong trx_id; + ulonglong write_count; + ulonglong lock_count; + int timeout_sec; + std::string state; + std::string waiting_key; + ulonglong waiting_cf_id; + int is_replication; + int skip_trx_api; + int read_only; + int deadlock_detect; + int num_ongoing_bulk_load; + ulong thread_id; + std::string query_str; +}; + +std::vector rdb_get_all_trx_info(); + +/* + This is + - the name of the default Column Family (the CF which stores indexes which + didn't explicitly specify which CF they are in) + - the name used to set the default column family parameter for per-cf + arguments. +*/ +const char *const DEFAULT_CF_NAME = "default"; + +/* + This is the name of the Column Family used for storing the data dictionary. +*/ +const char *const DEFAULT_SYSTEM_CF_NAME = "__system__"; + +/* + This is the name of the hidden primary key for tables with no pk. +*/ +const char *const HIDDEN_PK_NAME = "HIDDEN_PK_ID"; + +/* + Column family name which means "put this index into its own column family". + See Rdb_cf_manager::get_per_index_cf_name(). +*/ +const char *const PER_INDEX_CF_NAME = "$per_index_cf"; + +/* + Name for the background thread. +*/ +const char *const BG_THREAD_NAME = "myrocks-bg"; + +/* + Name for the drop index thread. +*/ +const char *const INDEX_THREAD_NAME = "myrocks-index"; + +/* + Separator between partition name and the qualifier. Sample usage: + + - p0_cfname=foo + - p3_tts_col=bar +*/ +const char RDB_PER_PARTITION_QUALIFIER_NAME_SEP = '_'; + +/* + Separator between qualifier name and value. Sample usage: + + - p0_cfname=foo + - p3_tts_col=bar +*/ +const char RDB_PER_PARTITION_QUALIFIER_VALUE_SEP = '='; + +/* + Separator between multiple qualifier assignments. Sample usage: + + - p0_cfname=foo;p1_cfname=bar;p2_cfname=baz +*/ +const char RDB_QUALIFIER_SEP = ';'; + +/* + Qualifier name for a custom per partition column family. +*/ +const char *const RDB_CF_NAME_QUALIFIER = "cfname"; + +/* + Default, minimal valid, and maximum valid sampling rate values when collecting + statistics about table. +*/ +#define RDB_DEFAULT_TBL_STATS_SAMPLE_PCT 10 +#define RDB_TBL_STATS_SAMPLE_PCT_MIN 1 +#define RDB_TBL_STATS_SAMPLE_PCT_MAX 100 + +/* + Default and maximum values for rocksdb-compaction-sequential-deletes and + rocksdb-compaction-sequential-deletes-window to add basic boundary checking. +*/ +#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES 0 +#define MAX_COMPACTION_SEQUENTIAL_DELETES 2000000 + +#define DEFAULT_COMPACTION_SEQUENTIAL_DELETES_WINDOW 0 +#define MAX_COMPACTION_SEQUENTIAL_DELETES_WINDOW 2000000 + +/* + Default and maximum values for various compaction and flushing related + options. Numbers are based on the hardware we currently use and our internal + benchmarks which indicate that parallelization helps with the speed of + compactions. + + Ideally of course we'll use heuristic technique to determine the number of + CPU-s and derive the values from there. This however has its own set of + problems and we'll choose simplicity for now. +*/ +#define MAX_BACKGROUND_COMPACTIONS 64 +#define MAX_BACKGROUND_FLUSHES 64 + +#define DEFAULT_SUBCOMPACTIONS 1 +#define MAX_SUBCOMPACTIONS 64 + +/* + Defines the field sizes for serializing XID object to a string representation. + string byte format: [field_size: field_value, ...] + [ + 8: XID.formatID, + 1: XID.gtrid_length, + 1: XID.bqual_length, + XID.gtrid_length + XID.bqual_length: XID.data + ] +*/ +#define RDB_FORMATID_SZ 8 +#define RDB_GTRID_SZ 1 +#define RDB_BQUAL_SZ 1 +#define RDB_XIDHDR_LEN (RDB_FORMATID_SZ + RDB_GTRID_SZ + RDB_BQUAL_SZ) + +/* collations, used in MariaRocks */ +enum collations_used { + COLLATION_UTF8MB4_BIN = 46, + COLLATION_LATIN1_BIN = 47, + COLLATION_UTF16LE_BIN = 55, + COLLATION_UTF32_BIN = 61, + COLLATION_UTF16_BIN = 62, + COLLATION_BINARY = 63, + COLLATION_UTF8_BIN = 83 +}; + +/* + To fix an unhandled exception we specify the upper bound as LONGLONGMAX + instead of ULONGLONGMAX because the latter is -1 and causes an exception when + cast to jlong (signed) of JNI + + The reason behind the cast issue is the lack of unsigned int support in Java. +*/ +#define MAX_RATE_LIMITER_BYTES_PER_SEC static_cast(LONGLONG_MAX) + +/* + Hidden PK column (for tables with no primary key) is a longlong (aka 8 bytes). + static_assert() in code will validate this assumption. +*/ +#define ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN sizeof(longlong) + +/* + MyRocks specific error codes. NB! Please make sure that you will update + HA_ERR_ROCKSDB_LAST when adding new ones. +*/ +#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1) +#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2) +#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) +#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 4) +#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 5) +#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED + +inline bool looks_like_per_index_cf_typo(const char *const name) { + return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME)); +} + +/** + @brief + Rdb_table_handler is a reference-counted structure storing information for + each open table. All the objects are stored in a global hash map. + + //TODO: join this with Rdb_tbl_def ? +*/ +struct Rdb_table_handler { + char *m_table_name; + uint m_table_name_length; + int m_ref_count; + + my_core::THR_LOCK m_thr_lock; ///< MySQL latch needed by m_db_lock + + /* Stores cumulative table statistics */ + my_io_perf_atomic_t m_io_perf_read; + Rdb_atomic_perf_counters m_table_perf_context; +}; + +class Rdb_key_def; +class Rdb_tbl_def; +class Rdb_transaction; +class Rdb_transaction_impl; +class Rdb_writebatch_impl; +class Rdb_field_encoder; + +const char *const rocksdb_hton_name = "ROCKSDB"; + +typedef struct _gl_index_id_s { + uint32_t cf_id; + uint32_t index_id; + bool operator==(const struct _gl_index_id_s &other) const { + return cf_id == other.cf_id && index_id == other.index_id; + } + bool operator!=(const struct _gl_index_id_s &other) const { + return cf_id != other.cf_id || index_id != other.index_id; + } + bool operator<(const struct _gl_index_id_s &other) const { + return cf_id < other.cf_id || + (cf_id == other.cf_id && index_id < other.index_id); + } + bool operator<=(const struct _gl_index_id_s &other) const { + return cf_id < other.cf_id || + (cf_id == other.cf_id && index_id <= other.index_id); + } + bool operator>(const struct _gl_index_id_s &other) const { + return cf_id > other.cf_id || + (cf_id == other.cf_id && index_id > other.index_id); + } + bool operator>=(const struct _gl_index_id_s &other) const { + return cf_id > other.cf_id || + (cf_id == other.cf_id && index_id >= other.index_id); + } +} GL_INDEX_ID; + +enum operation_type { + ROWS_DELETED = 0, + ROWS_INSERTED, + ROWS_READ, + ROWS_UPDATED, + ROWS_DELETED_BLIND, + ROWS_MAX +}; + +#if defined(HAVE_SCHED_GETCPU) +#define RDB_INDEXER get_sched_indexer_t +#else +#define RDB_INDEXER thread_id_indexer_t +#endif + +/* Global statistics struct used inside MyRocks */ +struct st_global_stats { + ib_counter_t rows[ROWS_MAX]; + + // system_rows_ stats are only for system + // tables. They are not counted in rows_* stats. + ib_counter_t system_rows[ROWS_MAX]; +}; + +/* Struct used for exporting status to MySQL */ +struct st_export_stats { + ulonglong rows_deleted; + ulonglong rows_inserted; + ulonglong rows_read; + ulonglong rows_updated; + ulonglong rows_deleted_blind; + + ulonglong system_rows_deleted; + ulonglong system_rows_inserted; + ulonglong system_rows_read; + ulonglong system_rows_updated; +}; + +} // namespace myrocks + +#include "./rdb_buff.h" + +/* Provide hash function for GL_INDEX_ID so we can include it in sets */ +namespace std { +template <> struct hash { + std::size_t operator()(const myrocks::GL_INDEX_ID &gl_index_id) const { + const uint64_t val = + ((uint64_t)gl_index_id.cf_id << 32 | (uint64_t)gl_index_id.index_id); + return std::hash()(val); + } +}; +} // namespace std + +namespace myrocks { + +/** + @brief + Class definition for ROCKSDB storage engine plugin handler +*/ + +class ha_rocksdb : public my_core::handler { + my_core::THR_LOCK_DATA m_db_lock; ///< MySQL database lock + + Rdb_table_handler *m_table_handler; ///< Open table handler + + /* Iterator used for range scans and for full table/index scans */ + rocksdb::Iterator *m_scan_it; + + /* Whether m_scan_it was created with skip_bloom=true */ + bool m_scan_it_skips_bloom; + + const rocksdb::Snapshot *m_scan_it_snapshot; + + Rdb_tbl_def *m_tbl_def; + + /* Primary Key encoder from KeyTupleFormat to StorageFormat */ + std::shared_ptr m_pk_descr; + + /* Array of index descriptors */ + std::shared_ptr *m_key_descr_arr; + + bool check_keyread_allowed(uint inx, uint part, bool all_parts) const; + + /* + Number of key parts in PK. This is the same as + table->key_info[table->s->primary_key].keyparts + */ + uint m_pk_key_parts; + + /* + TRUE <=> Primary Key columns can be decoded from the index + */ + mutable bool m_pk_can_be_decoded; + + /* + TRUE <=> Some fields in the PK may require unpack_info. + */ + bool m_maybe_unpack_info; + + uchar *m_pk_tuple; /* Buffer for storing PK in KeyTupleFormat */ + uchar *m_pk_packed_tuple; /* Buffer for storing PK in StorageFormat */ + // ^^ todo: change it to 'char*'? TODO: ^ can we join this with last_rowkey? + + /* + Temporary buffers for storing the key part of the Key/Value pair + for secondary indexes. + */ + uchar *m_sk_packed_tuple; + + /* + Temporary buffers for storing end key part of the Key/Value pair. + This is used for range scan only. + */ + uchar *m_end_key_packed_tuple; + + Rdb_string_writer m_sk_tails; + Rdb_string_writer m_pk_unpack_info; + + /* + ha_rockdb->index_read_map(.. HA_READ_KEY_EXACT or similar) will save here + mem-comparable form of the index lookup tuple. + */ + uchar *m_sk_match_prefix; + uint m_sk_match_length; + + /* Buffer space for the above */ + uchar *m_sk_match_prefix_buf; + + /* Second buffers, used by UPDATE. */ + uchar *m_sk_packed_tuple_old; + Rdb_string_writer m_sk_tails_old; + + /* Buffers used for duplicate checking during unique_index_creation */ + uchar *m_dup_sk_packed_tuple; + uchar *m_dup_sk_packed_tuple_old; + + /* + Temporary space for packing VARCHARs (we provide it to + pack_record()/pack_index_tuple() calls). + */ + uchar *m_pack_buffer; + + /* rowkey of the last record we've read, in StorageFormat. */ + String m_last_rowkey; + + /* Buffer used by convert_record_to_storage_format() */ + String m_storage_record; + + /* + Last retrieved record, in table->record[0] data format. + + This is used only when we get the record with rocksdb's Get() call (The + other option is when we get a rocksdb::Slice from an iterator) + */ + std::string m_retrieved_record; + + /* Type of locking to apply to rows */ + enum { RDB_LOCK_NONE, RDB_LOCK_READ, RDB_LOCK_WRITE } m_lock_rows; + + /* TRUE means we're doing an index-only read. FALSE means otherwise. */ + bool m_keyread_only; + + bool m_skip_scan_it_next_call; + + /* TRUE means we are accessing the first row after a snapshot was created */ + bool m_rnd_scan_is_new_snapshot; + + /* TRUE means the replication slave will use Read Free Replication */ + bool m_use_read_free_rpl; + + /* + TRUE means we should skip unique key checks for this table if the + replication lag gets too large + */ + bool m_skip_unique_check; + + /** + @brief + This is a bitmap of indexes (i.e. a set) whose keys (in future, values) may + be changed by this statement. Indexes that are not in the bitmap do not need + to be updated. + @note Valid inside UPDATE statements, IIF(m_update_scope_is_valid == true). + */ + my_core::key_map m_update_scope; + bool m_update_scope_is_valid; + + /* SST information used for bulk loading the primary key */ + std::shared_ptr m_sst_info; + Rdb_transaction *m_bulk_load_tx; + /* Mutex to protect finalizing bulk load */ + mysql_mutex_t m_bulk_load_mutex; + + /* + MySQL index number for duplicate key error + */ + int m_dupp_errkey; + + int create_key_defs(const TABLE *const table_arg, + Rdb_tbl_def *const tbl_def_arg, + const TABLE *const old_table_arg = nullptr, + const Rdb_tbl_def *const old_tbl_def_arg = nullptr) const + MY_ATTRIBUTE((__nonnull__(2, 3), __warn_unused_result__)); + int secondary_index_read(const int keyno, uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void setup_iterator_for_rnd_scan(); + void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice) + MY_ATTRIBUTE((__nonnull__)) { + setup_scan_iterator(kd, slice, false, false, 0); + } + bool is_ascending(const Rdb_key_def &keydef, + enum ha_rkey_function find_flag) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice, + const bool use_all_keys, const bool is_ascending, + const uint eq_cond_len) MY_ATTRIBUTE((__nonnull__)); + void release_scan_iterator(void); + + rocksdb::Status + get_for_update(Rdb_transaction *const tx, + rocksdb::ColumnFamilyHandle *const column_family, + const rocksdb::Slice &key, std::string *const value) const; + + int get_row_by_rowid(uchar *const buf, const char *const rowid, + const uint rowid_size, const bool skip_lookup = false) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int get_row_by_rowid(uchar *const buf, const uchar *const rowid, + const uint rowid_size, const bool skip_lookup = false) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) { + return get_row_by_rowid(buf, reinterpret_cast(rowid), + rowid_size, skip_lookup); + } + + void update_auto_incr_val(); + void load_auto_incr_value(); + longlong update_hidden_pk_val(); + int load_hidden_pk_value() MY_ATTRIBUTE((__warn_unused_result__)); + int read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + bool can_use_single_delete(const uint &index) const + MY_ATTRIBUTE((__warn_unused_result__)); + bool is_blind_delete_enabled(); + bool skip_unique_check() const MY_ATTRIBUTE((__warn_unused_result__)); +#ifdef MARIAROCKS_NOT_YET // MDEV-10975 + void set_force_skip_unique_check(bool skip) override; +#endif + bool commit_in_the_middle() MY_ATTRIBUTE((__warn_unused_result__)); + bool do_bulk_commit(Rdb_transaction *const tx) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + bool has_hidden_pk(const TABLE *const table) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + void update_row_stats(const operation_type &type); + + void set_last_rowkey(const uchar *const old_data); + + /* + Array of table->s->fields elements telling how to store fields in the + record. + */ + Rdb_field_encoder *m_encoder_arr; + + /* Describes instructions on how to decode the field */ + class READ_FIELD { + public: + /* Points to Rdb_field_encoder describing the field */ + Rdb_field_encoder *m_field_enc; + /* if true, decode the field, otherwise skip it */ + bool m_decode; + /* Skip this many bytes before reading (or skipping) this field */ + int m_skip; + }; + + /* + This tells which table fields should be decoded (or skipped) when + decoding table row from (pk, encoded_row) pair. (Secondary keys are + just always decoded in full currently) + */ + std::vector m_decoders_vect; + + /* Setup field_decoders based on type of scan and table->read_set */ + void setup_read_decoders(); + + /* + Number of bytes in on-disk (storage) record format that are used for + storing SQL NULL flags. + */ + uint m_null_bytes_in_rec; + + void get_storage_type(Rdb_field_encoder *const encoder, const uint &kp); + void setup_field_converters(); + int alloc_key_buffers(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool alloc_alter_buffers = false) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void free_key_buffers(); + + // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE + rocksdb::Range get_range(const int &i, uchar buf[]) const; + + /* + Perf timers for data reads + */ + Rdb_io_perf m_io_perf; + + /* + A counter of how many row checksums were checked for this table. Note that + this does not include checksums for secondary index entries. + */ + my_core::ha_rows m_row_checksums_checked; + + /* + Update stats + */ + void update_stats(void); + +public: + /* + The following two are currently only used for getting the range bounds + from QUICK_SELECT_DESC. + We don't need to implement prepare_index_key_scan[_map] because it is + only used with HA_READ_KEY_EXACT and HA_READ_PREFIX_LAST where one + can infer the bounds of the range being scanned, anyway. + */ + int prepare_index_scan() override; + int prepare_range_scan(const key_range *start_key, + const key_range *end_key) override; + + /* + Controls whether writes include checksums. This is updated from the session + variable + at the start of each query. + */ + bool m_store_row_debug_checksums; + + /* Same as above but for verifying checksums when reading */ + bool m_verify_row_debug_checksums; + int m_checksums_pct; + + ha_rocksdb(my_core::handlerton *const hton, + my_core::TABLE_SHARE *const table_arg); + ~ha_rocksdb() { + int err MY_ATTRIBUTE((__unused__)); + err = finalize_bulk_load(); + DBUG_ASSERT(err == 0); + mysql_mutex_destroy(&m_bulk_load_mutex); + } + + /** @brief + The name that will be used for display purposes. + */ + const char *table_type() const /*override*/ { + DBUG_ENTER_FUNC(); + // MariaDB: this function is not virtual, however ha_innodb + // declares it (and then never uses!) psergey-merge-todo:. + DBUG_RETURN(rocksdb_hton_name); + } + + /* The following is only used by SHOW KEYS: */ + const char *index_type(uint inx) override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN("LSMTREE"); + } + + /* + Not present in MariaDB: + const char **bas_ext() const override; + */ + + /* + See if this is the same base table - this should only be true for different + partitions of the same table. + */ + bool same_table(const ha_rocksdb &other) const; + + /** @brief + This is a list of flags that indicate what functionality the storage engine + implements. The current table flags are documented in handler.h + */ + ulonglong table_flags() const override { + DBUG_ENTER_FUNC(); + + /* + HA_BINLOG_STMT_CAPABLE + We are saying that this engine is just statement capable to have + an engine that can only handle statement-based logging. This is + used in testing. + HA_REC_NOT_IN_SEQ + If we don't set it, filesort crashes, because it assumes rowids are + 1..8 byte numbers + HA_PRIMARY_KEY_IN_READ_INDEX + This flag is always set, even for tables that: + - have no PK + - have some (or all) of PK that can't be decoded from the secondary + index. + */ + DBUG_RETURN(HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | + HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS | + HA_PRIMARY_KEY_IN_READ_INDEX | + HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_NULL_IN_KEY | + HA_PARTIAL_COLUMN_READ | + HA_TABLE_SCAN_ON_INDEX); + } + +private: + bool init_with_fields(); /* no 'override' in MariaDB */ +public: + /** @brief + This is a bitmap of flags that indicates how the storage engine + implements indexes. The current index flags are documented in + handler.h. If you do not implement indexes, just return zero here. + + @details + part is the key part to check. First key part is 0. + If all_parts is set, MySQL wants to know the flags for the combined + index, up to and including 'part'. + */ + ulong index_flags(uint inx, uint part, bool all_parts) const override; + + const key_map *keys_to_use_for_scanning() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(&key_map_full); + } + + bool primary_key_is_clustered() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(true); + } + + bool should_store_row_debug_checksums() const { + return m_store_row_debug_checksums && (rand() % 100 < m_checksums_pct); + } + + int rename_table(const char *const from, const char *const to) override + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_blob_from_storage_format(my_core::Field_blob *const blob, + Rdb_string_reader *const reader, + bool decode) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_varchar_from_storage_format( + my_core::Field_varstring *const field_var, + Rdb_string_reader *const reader, bool decode) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_field_from_storage_format(my_core::Field *const field, + Rdb_string_reader *const reader, + bool decode, uint len) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_record_from_storage_format(const rocksdb::Slice *const key, + const rocksdb::Slice *const value, + uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_record_from_storage_format(const rocksdb::Slice *const key, + uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + void convert_record_to_storage_format(const rocksdb::Slice &pk_packed_slice, + Rdb_string_writer *const pk_unpack_info, + rocksdb::Slice *const packed_rec) + MY_ATTRIBUTE((__nonnull__)); + + static const std::string gen_cf_name_qualifier_for_partition( + const std::string &s); + + static const std::vector parse_into_tokens(const std::string &s, + const char delim); + + static const std::string generate_cf_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + bool *per_part_match_found); + + static const char *get_key_name(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + static const char *get_key_comment(const uint index, + const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + static bool is_hidden_pk(const uint index, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + static uint pk_index(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + static bool is_pk(const uint index, const TABLE *table_arg, + const Rdb_tbl_def *tbl_def_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + /** @brief + unireg.cc will call max_supported_record_length(), max_supported_keys(), + max_supported_key_parts(), uint max_supported_key_length() + to make sure that the storage engine can handle the data it is about to + send. Return *real* limits of your storage engine here; MySQL will do + min(your_limits, MySQL_limits) automatically. + */ + uint max_supported_record_length() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(HA_MAX_REC_LENGTH); + } + + uint max_supported_keys() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(MAX_INDEXES); + } + + uint max_supported_key_parts() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(MAX_REF_PARTS); + } + + uint max_supported_key_part_length() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(2048); + } + + /** @brief + unireg.cc will call this to make sure that the storage engine can handle + the data it is about to send. Return *real* limits of your storage engine + here; MySQL will do min(your_limits, MySQL_limits) automatically. + + @details + There is no need to implement ..._key_... methods if your engine doesn't + support indexes. + */ + uint max_supported_key_length() const override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(16 * 1024); /* just to return something*/ + } + + /** + TODO: return actual upper bound of number of records in the table. + (e.g. save number of records seen on full table scan and/or use file size + as upper bound) + */ + ha_rows estimate_rows_upper_bound() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN(HA_POS_ERROR); + } + + /* At the moment, we're ok with default handler::index_init() implementation. + */ + int index_read_map(uchar *const buf, const uchar *const key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) override + MY_ATTRIBUTE((__warn_unused_result__)); + + int index_read_map_impl(uchar *const buf, const uchar *const key, + key_part_map keypart_map, + enum ha_rkey_function find_flag, + const key_range *end_key) + MY_ATTRIBUTE((__warn_unused_result__)); + + bool is_using_full_key(key_part_map keypart_map, uint actual_key_parts); + int read_range_first(const key_range *const start_key, + const key_range *const end_key, bool eq_range, + bool sorted) override + MY_ATTRIBUTE((__warn_unused_result__)); + + virtual double scan_time() override { + DBUG_ENTER_FUNC(); + + DBUG_RETURN( + static_cast((stats.records + stats.deleted) / 20.0 + 10)); + } + + virtual double read_time(uint, uint, ha_rows rows) override; + + int open(const char *const name, int mode, uint test_if_locked) override + MY_ATTRIBUTE((__warn_unused_result__)); + int close(void) override MY_ATTRIBUTE((__warn_unused_result__)); + + int write_row(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int update_row(const uchar *const old_data, uchar *const new_data) override + MY_ATTRIBUTE((__warn_unused_result__)); + int delete_row(const uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + rocksdb::Status delete_or_singledelete(uint index, Rdb_transaction *const tx, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::Slice &key) + MY_ATTRIBUTE((__warn_unused_result__)); + + int index_next(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int index_next_with_direction(uchar *const buf, bool move_forward) + MY_ATTRIBUTE((__warn_unused_result__)); + int index_prev(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + + int index_first(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int index_last(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + + class Item *idx_cond_push(uint keyno, class Item *const idx_cond) override; + /* + Default implementation from cancel_pushed_idx_cond() suits us + */ +private: + struct key_def_cf_info { + rocksdb::ColumnFamilyHandle *cf_handle; + bool is_reverse_cf; + bool is_auto_cf; + bool is_per_partition_cf; + }; + + struct update_row_info { + Rdb_transaction *tx; + const uchar *new_data; + const uchar *old_data; + rocksdb::Slice new_pk_slice; + rocksdb::Slice old_pk_slice; + + // "unpack_info" data for the new PK value + Rdb_string_writer *new_pk_unpack_info; + + longlong hidden_pk_id; + bool skip_unique_check; + }; + + /* + Used to check for duplicate entries during fast unique secondary index + creation. + */ + struct unique_sk_buf_info { + bool sk_buf_switch = false; + rocksdb::Slice sk_memcmp_key; + rocksdb::Slice sk_memcmp_key_old; + uchar *dup_sk_buf; + uchar *dup_sk_buf_old; + + /* + This method is meant to be called back to back during inplace creation + of unique indexes. It will switch between two buffers, which + will each store the memcmp form of secondary keys, which are then + converted to slices in sk_memcmp_key or sk_memcmp_key_old. + + Switching buffers on each iteration allows us to retain the + sk_memcmp_key_old value for duplicate comparison. + */ + inline uchar *swap_and_get_sk_buf() { + sk_buf_switch = !sk_buf_switch; + return sk_buf_switch ? dup_sk_buf : dup_sk_buf_old; + } + }; + + int create_cfs(const TABLE *const table_arg, Rdb_tbl_def *const tbl_def_arg, + std::array *const cfs) + const MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int create_key_def(const TABLE *const table_arg, const uint &i, + const Rdb_tbl_def *const tbl_def_arg, + std::shared_ptr *const new_key_def, + const struct key_def_cf_info &cf_info) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int create_inplace_key_defs( + const TABLE *const table_arg, Rdb_tbl_def *vtbl_def_arg, + const TABLE *const old_table_arg, + const Rdb_tbl_def *const old_tbl_def_arg, + const std::array &cfs) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + std::unordered_map + get_old_key_positions(const TABLE *table_arg, const Rdb_tbl_def *tbl_def_arg, + const TABLE *old_table_arg, + const Rdb_tbl_def *old_tbl_def_arg) const + MY_ATTRIBUTE((__nonnull__)); + + int compare_key_parts(const KEY *const old_key, + const KEY *const new_key) const; + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int index_first_intern(uchar *buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int index_last_intern(uchar *buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int find_icp_matching_index_rec(const bool &move_forward, uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + void calc_updated_indexes(); + int update_write_row(const uchar *const old_data, const uchar *const new_data, + const bool skip_unique_check) + MY_ATTRIBUTE((__warn_unused_result__)); + int get_pk_for_update(struct update_row_info *const row_info); + int check_and_lock_unique_pk(const uint &key_id, + const struct update_row_info &row_info, + bool *const found, bool *const pk_changed) + MY_ATTRIBUTE((__warn_unused_result__)); + int check_and_lock_sk(const uint &key_id, + const struct update_row_info &row_info, + bool *const found) const + MY_ATTRIBUTE((__warn_unused_result__)); + int check_uniqueness_and_lock(const struct update_row_info &row_info, + bool *const pk_changed) + MY_ATTRIBUTE((__warn_unused_result__)); + bool over_bulk_load_threshold(int *err) + MY_ATTRIBUTE((__warn_unused_result__)); + int check_duplicate_sk(const TABLE *table_arg, const Rdb_key_def &index, + const rocksdb::Slice *key, + struct unique_sk_buf_info *sk_info) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, + const rocksdb::Slice &key, const rocksdb::Slice &value) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int update_pk(const Rdb_key_def &kd, const struct update_row_info &row_info, + const bool &pk_changed) MY_ATTRIBUTE((__warn_unused_result__)); + int update_sk(const TABLE *const table_arg, const Rdb_key_def &kd, + const struct update_row_info &row_info) + MY_ATTRIBUTE((__warn_unused_result__)); + int update_indexes(const struct update_row_info &row_info, + const bool &pk_changed) + MY_ATTRIBUTE((__warn_unused_result__)); + + int read_key_exact(const Rdb_key_def &kd, rocksdb::Iterator *const iter, + const bool &using_full_key, + const rocksdb::Slice &key_slice) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int read_before_key(const Rdb_key_def &kd, const bool &using_full_key, + const rocksdb::Slice &key_slice) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int position_to_correct_key( + const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, + const bool &full_key_match, const uchar *const key, + const key_part_map &keypart_map, const rocksdb::Slice &key_slice, + bool *const move_forward) MY_ATTRIBUTE((__warn_unused_result__)); + + int read_row_from_primary_key(uchar *const buf) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int read_row_from_secondary_key(uchar *const buf, const Rdb_key_def &kd, + bool move_forward) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int calc_eq_cond_len(const Rdb_key_def &kd, + const enum ha_rkey_function &find_flag, + const rocksdb::Slice &slice, + const int &bytes_changed_by_succ, + const key_range *const end_key, + uint *const end_key_packed_size) + MY_ATTRIBUTE((__warn_unused_result__)); + + Rdb_tbl_def *get_table_if_exists(const char *const tablename) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + void read_thd_vars(THD *const thd) MY_ATTRIBUTE((__nonnull__)); + const char *thd_rocksdb_tmpdir() + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + bool contains_foreign_key(THD *const thd) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int inplace_populate_sk( + TABLE *const table_arg, + const std::unordered_set> &indexes) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +public: + int index_init(uint idx, bool sorted) override + MY_ATTRIBUTE((__warn_unused_result__)); + int index_end() override MY_ATTRIBUTE((__warn_unused_result__)); + + void unlock_row() override; + + /** @brief + Unlike index_init(), rnd_init() can be called two consecutive times + without rnd_end() in between (it only makes sense if scan=1). In this + case, the second call should prepare for the new table scan (e.g if + rnd_init() allocates the cursor, the second call should position the + cursor to the start of the table; no need to deallocate and allocate + it again. This is a required method. + */ + int rnd_init(bool scan) override MY_ATTRIBUTE((__warn_unused_result__)); + int rnd_end() override MY_ATTRIBUTE((__warn_unused_result__)); + + int rnd_next(uchar *const buf) override + MY_ATTRIBUTE((__warn_unused_result__)); + int rnd_next_with_direction(uchar *const buf, bool move_forward) + MY_ATTRIBUTE((__warn_unused_result__)); + + int rnd_pos(uchar *const buf, uchar *const pos) override + MY_ATTRIBUTE((__warn_unused_result__)); + void position(const uchar *const record) override; + int info(uint) override; + + /* This function will always return success, therefore no annotation related + * to checking the return value. Can't change the signature because it's + * required by the interface. */ + int extra(enum ha_extra_function operation) override; + + int start_stmt(THD *const thd, thr_lock_type lock_type) override + MY_ATTRIBUTE((__warn_unused_result__)); + int external_lock(THD *const thd, int lock_type) override + MY_ATTRIBUTE((__warn_unused_result__)); + int truncate() override MY_ATTRIBUTE((__warn_unused_result__)); + + int reset() override { + DBUG_ENTER_FUNC(); + + /* Free blob data */ + m_retrieved_record.clear(); + + DBUG_RETURN(HA_EXIT_SUCCESS); + } + + int check(THD *const thd, HA_CHECK_OPT *const check_opt) override + MY_ATTRIBUTE((__warn_unused_result__)); + void remove_rows(Rdb_tbl_def *const tbl); + ha_rows records_in_range(uint inx, key_range *const min_key, + key_range *const max_key) override + MY_ATTRIBUTE((__warn_unused_result__)); + int delete_table(const char *const from) override + MY_ATTRIBUTE((__warn_unused_result__)); + int create(const char *const name, TABLE *const form, + HA_CREATE_INFO *const create_info) override + MY_ATTRIBUTE((__warn_unused_result__)); + bool check_if_incompatible_data(HA_CREATE_INFO *const info, + uint table_changes) override + MY_ATTRIBUTE((__warn_unused_result__)); + + THR_LOCK_DATA **store_lock(THD *const thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) override + MY_ATTRIBUTE((__warn_unused_result__)); + + my_bool register_query_cache_table(THD *const thd, char *const table_key, + uint key_length, + qc_engine_callback *const engine_callback, + ulonglong *const engine_data) override { + DBUG_ENTER_FUNC(); + + /* Currently, we don't support query cache */ + DBUG_RETURN(FALSE); + } + + bool get_error_message(const int error, String *const buf) override + MY_ATTRIBUTE((__nonnull__)); + + void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *const first_value, + ulonglong *const nb_reserved_values) override; + void update_create_info(HA_CREATE_INFO *const create_info) override; + int optimize(THD *const thd, HA_CHECK_OPT *const check_opt) override + MY_ATTRIBUTE((__warn_unused_result__)); + int analyze(THD *const thd, HA_CHECK_OPT *const check_opt) override + MY_ATTRIBUTE((__warn_unused_result__)); + int calculate_stats(const TABLE *const table_arg, THD *const thd, + HA_CHECK_OPT *const check_opt) + MY_ATTRIBUTE((__warn_unused_result__)); + + enum_alter_inplace_result check_if_supported_inplace_alter( + TABLE *altered_table, + my_core::Alter_inplace_info *const ha_alter_info) override; + + bool prepare_inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) override; + + bool inplace_alter_table( + TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info) override; + + bool + commit_inplace_alter_table(TABLE *const altered_table, + my_core::Alter_inplace_info *const ha_alter_info, + bool commit) override; + + int finalize_bulk_load() MY_ATTRIBUTE((__warn_unused_result__)); + +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 + void set_use_read_free_rpl(const char *const whitelist); +#endif + void set_skip_unique_check_tables(const char *const whitelist); + +#ifdef MARIAROCKS_NOT_YET // MDEV-10976 +public: + virtual void rpl_before_delete_rows() override; + virtual void rpl_after_delete_rows() override; + virtual void rpl_before_update_rows() override; + virtual void rpl_after_update_rows() override; + virtual bool use_read_free_rpl(); +#endif // MARIAROCKS_NOT_YET + +private: + /* Flags tracking if we are inside different replication operation */ + bool m_in_rpl_delete_rows; + bool m_in_rpl_update_rows; + + bool m_force_skip_unique_check; +}; + +/* + Helper class for in-place alter, for storing handler context between inplace + alter calls +*/ +struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx { + /* The new table definition */ + Rdb_tbl_def *const m_new_tdef; + + /* Stores the original key definitions */ + std::shared_ptr *const m_old_key_descr; + + /* Stores the new key definitions */ + std::shared_ptr *m_new_key_descr; + + /* Stores the old number of key definitions */ + const uint m_old_n_keys; + + /* Stores the new number of key definitions */ + const uint m_new_n_keys; + + /* Stores the added key glids */ + const std::unordered_set> m_added_indexes; + + /* Stores the dropped key glids */ + const std::unordered_set m_dropped_index_ids; + + /* Stores number of keys to add */ + const uint m_n_added_keys; + + /* Stores number of keys to drop */ + const uint m_n_dropped_keys; + + Rdb_inplace_alter_ctx( + Rdb_tbl_def *new_tdef, std::shared_ptr *old_key_descr, + std::shared_ptr *new_key_descr, uint old_n_keys, + uint new_n_keys, + std::unordered_set> added_indexes, + std::unordered_set dropped_index_ids, uint n_added_keys, + uint n_dropped_keys) + : my_core::inplace_alter_handler_ctx(), m_new_tdef(new_tdef), + m_old_key_descr(old_key_descr), m_new_key_descr(new_key_descr), + m_old_n_keys(old_n_keys), m_new_n_keys(new_n_keys), + m_added_indexes(added_indexes), m_dropped_index_ids(dropped_index_ids), + m_n_added_keys(n_added_keys), m_n_dropped_keys(n_dropped_keys) {} + + ~Rdb_inplace_alter_ctx() {} + +private: + /* Disable Copying */ + Rdb_inplace_alter_ctx(const Rdb_inplace_alter_ctx &); + Rdb_inplace_alter_ctx &operator=(const Rdb_inplace_alter_ctx &); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/ha_rocksdb_proto.h b/storage/rocksdb/ha_rocksdb_proto.h new file mode 100644 index 0000000000000..a4f1b2039b58d --- /dev/null +++ b/storage/rocksdb/ha_rocksdb_proto.h @@ -0,0 +1,87 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include +#include + +/* MySQL header files */ +#include "./sql_string.h" + +/* RocksDB includes */ +#include "rocksdb/table.h" +#include "rocksdb/utilities/transaction_db.h" + +namespace myrocks { + +enum RDB_IO_ERROR_TYPE { + RDB_IO_ERROR_TX_COMMIT, + RDB_IO_ERROR_DICT_COMMIT, + RDB_IO_ERROR_BG_THREAD, + RDB_IO_ERROR_GENERAL, + RDB_IO_ERROR_LAST +}; + +const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type); + +void rdb_handle_io_error(const rocksdb::Status status, + const RDB_IO_ERROR_TYPE err_type); + +int rdb_normalize_tablename(const std::string &tablename, std::string *str) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +int rdb_split_normalized_tablename(const std::string &fullname, std::string *db, + std::string *table = nullptr, + std::string *partition = nullptr) + MY_ATTRIBUTE((__warn_unused_result__)); + +std::vector rdb_get_open_table_names(void); + +class Rdb_perf_counters; +int rdb_get_table_perf_counters(const char *tablename, + Rdb_perf_counters *counters) + MY_ATTRIBUTE((__nonnull__(2))); + +void rdb_get_global_perf_counters(Rdb_perf_counters *counters) + MY_ATTRIBUTE((__nonnull__(1))); + +void rdb_queue_save_stats_request(); + +/* + Access to singleton objects. +*/ + +rocksdb::TransactionDB *rdb_get_rocksdb_db(); + +class Rdb_cf_manager; +Rdb_cf_manager &rdb_get_cf_manager(); + +rocksdb::BlockBasedTableOptions &rdb_get_table_options(); + +class Rdb_dict_manager; +Rdb_dict_manager *rdb_get_dict_manager(void) + MY_ATTRIBUTE((__warn_unused_result__)); + +class Rdb_ddl_manager; +Rdb_ddl_manager *rdb_get_ddl_manager(void) + MY_ATTRIBUTE((__warn_unused_result__)); + +class Rdb_binlog_manager; +Rdb_binlog_manager *rdb_get_binlog_manager(void) + MY_ATTRIBUTE((__warn_unused_result__)); + +} // namespace myrocks diff --git a/storage/rocksdb/logger.h b/storage/rocksdb/logger.h new file mode 100644 index 0000000000000..ca75caf9df578 --- /dev/null +++ b/storage/rocksdb/logger.h @@ -0,0 +1,85 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include +#include +#include + +namespace myrocks { + +class Rdb_logger : public rocksdb::Logger { +public: + explicit Rdb_logger(const rocksdb::InfoLogLevel log_level = + rocksdb::InfoLogLevel::ERROR_LEVEL) + : m_mysql_log_level(log_level) {} + + void Logv(const rocksdb::InfoLogLevel log_level, const char *format, + va_list ap) override { + DBUG_ASSERT(format != nullptr); + + enum loglevel mysql_log_level; + + if (m_logger) { + m_logger->Logv(log_level, format, ap); + } + + if (log_level < m_mysql_log_level) { + return; + } + + if (log_level >= rocksdb::InfoLogLevel::ERROR_LEVEL) { + mysql_log_level = ERROR_LEVEL; + } else if (log_level >= rocksdb::InfoLogLevel::WARN_LEVEL) { + mysql_log_level = WARNING_LEVEL; + } else { + mysql_log_level = INFORMATION_LEVEL; + } + + // log to MySQL + std::string f("LibRocksDB:"); + f.append(format); + error_log_print(mysql_log_level, f.c_str(), ap); + } + + void Logv(const char *format, va_list ap) override { + DBUG_ASSERT(format != nullptr); + // If no level is specified, it is by default at information level + Logv(rocksdb::InfoLogLevel::INFO_LEVEL, format, ap); + } + + void SetRocksDBLogger(const std::shared_ptr logger) { + m_logger = logger; + } + + void SetInfoLogLevel(const rocksdb::InfoLogLevel log_level) override { + // The InfoLogLevel for the logger is used by rocksdb to filter + // messages, so it needs to be the lower of the two loggers + rocksdb::InfoLogLevel base_level = log_level; + + if (m_logger && m_logger->GetInfoLogLevel() < base_level) { + base_level = m_logger->GetInfoLogLevel(); + } + rocksdb::Logger::SetInfoLogLevel(base_level); + m_mysql_log_level = log_level; + } + +private: + std::shared_ptr m_logger; + rocksdb::InfoLogLevel m_mysql_log_level; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc b/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc new file mode 100644 index 0000000000000..82ceda1914d33 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/dup_key_update.inc @@ -0,0 +1,69 @@ +let $max_table = 2; + +--disable_query_log +let $table = 1; +while ($table <= $max_table) { + let $max = 9; + let $i = 2; + while ($i <= $max) { + let $insert = INSERT INTO t$table VALUES ($i, $i, $i); + eval $insert; + inc $i; + } + inc $table; +} +--enable_query_log + +let $table = 1; +while ($table <= $max_table) { + let $i = 1; + let $j = 9; + while ($i <= $max) { + + let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j; + eval $insert; + + let $select = SELECT * FROM t$table WHERE id1 = $i; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i; + eval $select; + + inc $j; + + let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j; + eval $insert; + + let $select = SELECT * FROM t$table WHERE id1 = $i; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i; + eval $select; + + inc $j; + + let $insert = INSERT INTO t$table VALUES ($i, $i, $i) ON DUPLICATE KEY UPDATE id2 = $j; + eval $insert; + + let $select = SELECT * FROM t$table WHERE id1 = $i; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3) WHERE id3 = $i; + eval $select; + + inc $j; + + inc $i; + inc $i; + inc $i; + inc $i; + } + + let $select = SELECT * FROM t$table; + eval $select; + + let $select = SELECT * FROM t$table FORCE INDEX (id3); + eval $select; + + inc $table; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc new file mode 100644 index 0000000000000..1f762d38c6401 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.inc @@ -0,0 +1,10 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb' AND support IN ('YES', 'DEFAULT', 'ENABLED')`) +{ + --skip Test requires engine RocksDB. +} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.opt b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.opt new file mode 100644 index 0000000000000..36d7dda16094f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb.opt @@ -0,0 +1,12 @@ +--loose-enable-rocksdb +--loose-enable-rocksdb_global_info +--loose-enable-rocksdb_ddl +--loose-enable-rocksdb_cf_options +--loose-enable_rocksdb_perf_context +--loose-enable_rocksdb_perf_context_global +--loose-enable-rocksdb_index_file_map +--loose-enable-rocksdb_dbstats +--loose-enable-rocksdb_cfstats +--loose-enable-rocksdb_lock_info +--loose-enable-rocksdb_trx +--loose-enable-rocksdb_locks diff --git a/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc new file mode 100644 index 0000000000000..2c50afd5014cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_default.inc @@ -0,0 +1,10 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb'` AND support in ('DEFAULT')`) +{ + --skip Test requires engine RocksDB as default. +} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc new file mode 100644 index 0000000000000..92261211bf5b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/have_rocksdb_replication.inc @@ -0,0 +1,11 @@ +# MARIAROCKS_NOT_YET: replication doesn't work yet: +#if (`select count(*) = 0 from information_schema.tables where engine='rocksdb' and table_name='slave_gtid_info'`) +#{ +# --skip Test requires default engine RocksDB +#} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc new file mode 100644 index 0000000000000..6dc5a78e3a08c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_1.inc @@ -0,0 +1,51 @@ +# +# Check concurrent locking issues: +# Locking rows that do not exist when using all primary key columns in a +# WHERE clause +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case1_1.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 1.1: +--echo - Locking rows that do not exist when using all primary key columns in +--echo - a WHERE clause +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t0 VALUES (1,5,0); + +--error ER_LOCK_WAIT_TIMEOUT +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc new file mode 100644 index 0000000000000..13083bf82d9a6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case1_2.inc @@ -0,0 +1,48 @@ +# +# Check concurrent locking issues: +# Locking rows that do not exist without using all primary key columns in a +# WHERE clause +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case1_2.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 1.2: +--echo - Locking rows that do not exist without using all primary key +--echo - columns in a WHERE clause +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +SELECT * FROM t0 WHERE id1=1 FOR UPDATE; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; +INSERT INTO t0 VALUES (1,5,0); + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc new file mode 100644 index 0000000000000..61c604dd6d383 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case2.inc @@ -0,0 +1,97 @@ +# +# Check concurrent locking issues: +# Rows that are scanned but do not match the WHERE clause are not locked. +# +# To call this, set $isolation_level and call this file +# If you want to enable rocksdb_lock_scanned_rows set $lock_scanned_rows=1 +# +# let $isolation_level = REPEATABLE READ; +# let $lock_scanned_rows = 1 (optional) +# --source suite/rocksdb/include/locking_issues_case2.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 2: +--echo - Rows that are scanned but do not match the WHERE are not locked +--echo - using $isolation_level transaction isolation level unless +--echo - rocksdb_lock_scanned_rows is on +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +SELECT @@global.rocksdb_lock_scanned_rows; + +if ($lock_scanned_rows) +{ + let $original_val=query_get_value( + select @@global.rocksdb_lock_scanned_rows as val, val, 1); + SET GLOBAL rocksdb_lock_scanned_rows=ON; +} + +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +if ($lock_scanned_rows == 1) +{ + connection con1; + # This is expected to leave locks on all the rows in t0 + SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + + connection con2; + --error ER_LOCK_WAIT_TIMEOUT + UPDATE t0 SET VALUE=10 WHERE id=1; +} + +if ($lock_scanned_rows == 0) +{ + connection con1; + # This is expected to release locks on rows with value=0 + SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + + connection con2; + # This should succeed as con1 should have released the lock on row (1,0) + UPDATE t0 SET VALUE=10 WHERE id=1; + + # This should fail because lock on row (5,1) is still held. + --error ER_LOCK_WAIT_TIMEOUT + UPDATE t0 SET VALUE=10 WHERE id=5; + + connection con1; + # Do another operation + UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; + + connection con2; + # Check that row (4,0) is still not locked + SELECT * FROM t0 WHERE id=4 FOR UPDATE; + + COMMIT; + SELECT * FROM t0; +} + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; + +if ($lock_scanned_rows == 1) +{ + eval SET GLOBAL rocksdb_lock_scanned_rows=$original_val; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc new file mode 100644 index 0000000000000..c23717c4fda40 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case3.inc @@ -0,0 +1,70 @@ +# +# Check concurrent locking issues: +# After creating a snapshot, other clients updating rows +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case3.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 3: +--echo - After creating a snapshot, other clients updating rows +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; + +connection con1; +--error 0,ER_LOCK_DEADLOCK +reap; +--echo ERROR: $mysql_errno + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc new file mode 100644 index 0000000000000..da80f79675089 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case4.inc @@ -0,0 +1,68 @@ +# +# Check concurrent locking issues: +# Phantom rows +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case4.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 4: +--echo - Phantom rows +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +INSERT INTO t0 VALUES(200001,1), (-1,1); + +connection con1; +reap; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc new file mode 100644 index 0000000000000..b77a54e43602e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case5.inc @@ -0,0 +1,76 @@ +# +# Check concurrent locking issues: +# Deleting primary key +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case5.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 5: +--echo - Deleting primary key +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +UPDATE t0 SET value=100 WHERE id=190000; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +DELETE FROM t0 WHERE id=190000; +COMMIT; + +connection con1; +--error 0,ER_LOCK_DEADLOCK +reap; +--echo ERROR: $mysql_errno +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc new file mode 100644 index 0000000000000..9494146ba5ccd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case6.inc @@ -0,0 +1,76 @@ +# +# Check concurrent locking issues: +# Changing primary key +# +# To call this, set $isolation_level and call this file +# +# let $isolation_level = REPEATABLE READ; +# --source suite/rocksdb/include/locking_issues_case6.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 6: +--echo - Changing primary key +--echo - using $isolation_level transaction isolation level +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t0; +--enable_warnings + +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); + +# Insert 200,000 rows, breaking it up into inserts of 1000 rows at a time +--echo Inserting 200,000 rows +--disable_query_log +SET @save_rocksdb_bulk_load=@@rocksdb_bulk_load; +SET rocksdb_bulk_load=1; +SET @save_rocksdb_write_disable_wal=@@rocksdb_write_disable_wal; +SET GLOBAL rocksdb_write_disable_wal=1; +let $i = 1; +while ($i <= 200) { + eval BEGIN; + let $j = 1; + while ($j <= 100) { + eval INSERT INTO t0(value) VALUES (0),(0),(0),(0),(0),(0),(0),(0),(0),(0); + inc $j; + } + eval COMMIT; + inc $i; +} +SET rocksdb_bulk_load=@save_rocksdb_bulk_load; +SET GLOBAL rocksdb_write_disable_wal=@save_rocksdb_write_disable_wal; +--enable_query_log + +UPDATE t0 SET value=100 WHERE id=190000; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +let $ID = `SELECT connection_id()`; +send SELECT * FROM t0 WHERE value > 0 FOR UPDATE; + +connection con2; +let $wait_condition = SELECT 1 FROM information_schema.processlist + WHERE id = $ID AND state = "Sending data"; +--source include/wait_condition.inc +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; +UPDATE t0 SET id=200001 WHERE id=190000; +COMMIT; + +connection con1; +--error 0,ER_LOCK_DEADLOCK +reap; +--echo ERROR: $mysql_errno +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc new file mode 100644 index 0000000000000..d71d398982ec3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/locking_issues_case7.inc @@ -0,0 +1,89 @@ +# +# Check concurrent locking issues: +# Rows scanned but are not in the updated table should be locked when +# rocksdb_lock_scanned_rows is on but not locked otherwise. +# +# To call this, set $isolation_level and $lock_scanned_rows and call this file +# +# let $isolation_level = REPEATABLE READ; +# let $lock_scanned_rows = 0 (or 1) +# --source suite/rocksdb/include/locking_issues_case7.inc +# + +--echo +--echo ----------------------------------------------------------------------- +--echo - Locking issues case 7: +--echo - Rows that are scanned as part of a query but not in the table being +--echo - updated should not be locked unless rocksdb_lock_scanned_rows is on +--echo ----------------------------------------------------------------------- + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +SELECT @@global.rocksdb_lock_scanned_rows; + +if ($lock_scanned_rows) +{ + let $original_val=query_get_value( + select @@global.rocksdb_lock_scanned_rows as val, val, 1); + SET GLOBAL rocksdb_lock_scanned_rows=ON; +} + +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +connection con2; +eval SET SESSION TRANSACTION ISOLATION LEVEL $isolation_level; +BEGIN; + +--echo lock_scanned_rows is $lock_scanned_rows +if ($lock_scanned_rows == 1) +{ + connection con1; + # This is expected to leave a lock id=3 in t2; + UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; + + connection con2; + --error ER_LOCK_WAIT_TIMEOUT + UPDATE t2 SET value=value+100 WHERE id=3; + + # No other row in t2 should be locked; + UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); + SELECT * FROM t2; +} + +if ($lock_scanned_rows == 0) +{ + connection con1; + # This should leave no locks on any row in t2; + UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; + + connection con2; + UPDATE t2 SET value=value+100; + SELECT * FROM t2; +} + +connection con1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t1; +DROP TABLE t2; + +if ($lock_scanned_rows == 1) +{ + eval SET GLOBAL rocksdb_lock_scanned_rows=$original_val; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc new file mode 100644 index 0000000000000..71e713226d753 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_concurrent_delete.inc @@ -0,0 +1,53 @@ +# Usage: +# +# let $order = ASC; # or DESC +# let $comment = "rev:cf2"; # or "" +# --source suite/rocksdb/include/rocksdb_concurrent_delete.inc + +let $first_row = -1; # Error this should never happen +if ($order == 'ASC') +{ + let $first_row = 1; +} +if ($order == 'DESC') +{ + let $first_row = 3; +} + +connect (con, localhost, root,,); +connection default; + +--disable_warnings +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT $comment, a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); + +# This will cause the SELECT to block after finding the first row, but +# before locking and reading it. +connection con; +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +send_eval SELECT * FROM t1 order by t1.pk $order FOR UPDATE; + +# While that connection is waiting, delete the first row (the one con +# is about to lock and read +connection default; +SET debug_sync='now WAIT_FOR parked'; +eval DELETE FROM t1 WHERE pk = $first_row; + +# Signal the waiting select to continue +SET debug_sync='now SIGNAL go'; + +# Now get the results from the select. The first entry (1,1) (or (3,3) when +# using reverse ordering) should be missing. Prior to the fix the SELECT +# would have returned: "1815: Internal error: NotFound:" +connection con; +reap; + +# Cleanup +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc new file mode 100644 index 0000000000000..c76b52d4cc195 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc @@ -0,0 +1,199 @@ +# +# Testing Index Condition Pushdown for MyRocks +# Test file parameter: $cf_name specifies the CF to store test data in +# It can be forward or reverse-ordered CF +# +select * from information_schema.engines where engine = 'rocksdb'; + +--disable_warnings +drop table if exists t0,t1,t2,t3; +--enable_warnings +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; + +eval +create table t2 ( + pk int primary key, + kp1 int, + kp2 int, + col1 int, + key (kp1,kp2) comment '$cf_name' +) engine=rocksdb; + +insert into t2 select a,a,a,a from t1; + +--echo # Try a basic case: +--replace_column 9 # +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; + +--echo # Check that ICP doesnt work for columns where column value +--echo # cant be restored from mem-comparable form: + +eval +create table t3 ( + pk int primary key, + kp1 int, + kp2 varchar(10) collate utf8_general_ci, + col1 int, + key (kp1,kp2) comment '$cf_name' +) engine=rocksdb; + +insert into t3 select a,a/10,a,a from t1; +--echo # This must not use ICP: +--replace_column 9 # +explain +select * from t3 where kp1=3 and kp2 like '%foo%'; + +--replace_column 9 # +explain format=json +select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%'; + +--echo # Check that we handle the case where out-of-range is encountered sooner +--echo # than matched index condition +--replace_column 9 # +explain +select * from t2 where kp1< 3 and kp2+1>50000; +select * from t2 where kp1< 3 and kp2+1>50000; + +--replace_column 9 # +explain +select * from t2 where kp1< 3 and kp2+1>50000; +select * from t2 where kp1< 3 and kp2+1>50000; + +--echo # Try doing backwards scans +--echo # MariaDB: ICP is not supported for reverse scans. + +--replace_column 9 # +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; + +--replace_column 9 # +explain +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; + +--replace_column 9 # +explain +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; + +drop table t0,t1,t2,t3; + +--echo # +--echo # Check how ICP affects counters +--echo # +--echo # First, some preparations +--echo # +--echo # in facebook/mysql-5.6, it was: +--echo # select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +--echo # +--echo # In MariaDB, we do: +delimiter |; +create procedure save_read_stats() +begin + set @rr=(select ROWS_READ + from information_schema.table_statistics + where table_name='t4' and table_schema=database()); + + set @rif= (select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_read_first'); + + set @rin=(select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_read_next'); + + set @icp_attempts=(select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_attempts'); + + set @icp_matches=(select VARIABLE_VALUE + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_match'); +end| + +create procedure get_read_stats() +begin + select + (select ROWS_READ + from information_schema.table_statistics + where table_name='t4' and table_schema=database() + ) - @rr as ROWS_READ_DIFF, + + (select VARIABLE_VALUE - @rif + from information_schema.session_status + where VARIABLE_NAME='Handler_read_first') as ROWS_INDEX_FIRST, + + (select VARIABLE_VALUE - @rin + from information_schema.session_status + where VARIABLE_NAME='Handler_read_next') as ROWS_INDEX_NEXT, + + (select VARIABLE_VALUE - @icp_attempts + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_attempts') as ICP_ATTEMPTS, + + (select VARIABLE_VALUE - @icp_matches + from information_schema.session_status + where VARIABLE_NAME='Handler_icp_match') as ICP_MATCHES; +end| + +delimiter ;| + +eval +create table t4 ( + id int, + id1 int, + id2 int, + value int, + value2 varchar(100), + primary key (id), + key id1_id2 (id1, id2) comment '$cf_name' +) engine=rocksdb charset=latin1 collate latin1_bin; + +insert into t4 values +(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5), +(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10); + +--echo # +--echo # Now, the test itself +--echo # +call save_read_stats(); +call get_read_stats(); + + +--echo # ============== index-only query ============== +--replace_column 9 # +explain +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +call save_read_stats(); +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +query_vertical call get_read_stats(); + +--echo # ============== Query without ICP ============== +set optimizer_switch='index_condition_pushdown=off'; +--replace_column 9 # +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +query_vertical call get_read_stats(); + +--echo # ============== Query with ICP ============== +set optimizer_switch='index_condition_pushdown=on'; +--replace_column 9 # +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +query_vertical call get_read_stats(); + +drop table t4; +drop procedure save_read_stats; +drop procedure get_read_stats; + diff --git a/storage/rocksdb/mysql-test/rocksdb/my.cnf b/storage/rocksdb/mysql-test/rocksdb/my.cnf new file mode 100644 index 0000000000000..2beaf514cee64 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/my.cnf @@ -0,0 +1,17 @@ +!include include/default_my.cnf + +[server] +skip-innodb +default-storage-engine=rocksdb + + +sql-mode=NO_ENGINE_SUBSTITUTION +explicit-defaults-for-timestamp=1 +loose-rocksdb_lock_wait_timeout=1 +loose-rocksdb_strict_collation_check=0 + +loose-rocksdb-flush-log-at-trx-commit=0 + +# The following is to get rid of the harmless +# "Deadlock found when trying to get lock" errors, see MDEV-12285. +log-warnings=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/1st.result b/storage/rocksdb/mysql-test/rocksdb/r/1st.result new file mode 100644 index 0000000000000..323b614ea36fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/1st.result @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL DEFAULT 0, + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI 0 +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 VALUES (1, 1,'a'); +INSERT INTO t1 (a,b) VALUES (2,'b'); +SELECT a,b FROM t1; +a b +1 a +2 b +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result new file mode 100644 index 0000000000000..06452a5437f13 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/2pc_group_commit.result @@ -0,0 +1,44 @@ +# Disable for valgrind because this takes too long +DROP DATABASE IF EXISTS mysqlslap; +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; +# 2PC enabled, MyRocks durability enabled +SET GLOBAL rocksdb_enable_2pc=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +## 2PC + durability + single thread +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 1000 then 'true' else 'false' end +false +## 2PC + durability + group commit +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end +false +# 2PC enabled, MyRocks durability disabled +SET GLOBAL rocksdb_enable_2pc=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +# 2PC disabled, MyRocks durability enabled +SET GLOBAL rocksdb_enable_2pc=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +false +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +case when variable_value-@c = 0 then 'true' else 'false' end +false +SET GLOBAL rocksdb_enable_2pc=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +DROP TABLE t1; +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result new file mode 100644 index 0000000000000..2aeeda4cfe60d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result @@ -0,0 +1,410 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `ka` (`a`), + KEY `kab` (`a`,`b`), + KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kab` (`a`,`b`), + KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kb` (`b`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ADD INDEX kba(b,a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kb` (`b`), + KEY `kba` (`b`,`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kab` (`a`,`b`), + KEY `ka` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(ka) WHERE b > 5; +a b +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +a b +DROP TABLE t1; +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk char(8) NO PRI NULL +a varchar(11) YES NULL +b int(10) unsigned YES NULL +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +pk a b +bbb 2222 2 +DROP TABLE t1; +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk char(8) NO PRI NULL +a varchar(11) YES NULL +b int(10) unsigned YES NULL +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX ka(a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `ka` (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > '2' AND b < 3; +pk a b +bbb 2222 2 +DROP TABLE t1; +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk char(8) NO PRI NULL +a varchar(11) YES NULL +b int(10) unsigned YES NULL +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ADD INDEX ka(a), ADD INDEX kb(b), ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kb, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(8) COLLATE utf8_bin NOT NULL, + `a` varchar(11) COLLATE utf8_bin DEFAULT NULL, + `b` int(10) unsigned DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +pk a b +bbb 2222 2 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +CREATE INDEX kb on t1 (b); +CREATE INDEX kba on t1 (b,a); +DROP INDEX ka on t1; +DROP INDEX kab on t1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `kb` (`b`), + KEY `kba` (`b`,`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +a b +2 6 +3 7 +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +a b +3 7 +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +DROP INDEX kij ON t1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY KEY (i) +PARTITIONS 4 +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; +set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +set global rocksdb_strict_collation_check=1; +CREATE TABLE t1 (a INT, b TEXT); +ALTER TABLE t1 ADD KEY kb(b(10)); +ERROR HY000: Unsupported collation on string indexed column test.t1.b Use binary collation (latin1_bin, binary, utf8_bin). +ALTER TABLE t1 ADD PRIMARY KEY(a); +DROP TABLE t1; +set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; +set global rocksdb_bulk_load=1; +# Establish connection con1 (user=root) +connect con1,localhost,root,,; +# Switch to connection con1 +connection con1; +show global variables like 'rocksdb_bulk_load'; +Variable_name Value +rocksdb_bulk_load ON +show session variables like 'rocksdb_bulk_load'; +Variable_name Value +rocksdb_bulk_load ON +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,1); +# Disconnecting on con1 +disconnect con1; +# Establish connection con2 (user=root) +connect con2,localhost,root,,; +# Switch to connection con2 +connection con2; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +COUNT(*) +1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); +COUNT(*) +1 +DROP TABLE t1; +disconnect con2; +# Establish connection con1 (user=root) +connect con1,localhost,root,,; +# Establish connection con2 (user=root) +connect con2,localhost,root,,; +# Switch to connection con1 +connection con1; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +set rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1,1); +# Switch to connection con2 +connection con2; +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +COUNT(*) +0 +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +COUNT(*) +1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); +COUNT(*) +1 +set global rocksdb_bulk_load=0; +DROP TABLE t1; +connection default; +SET @prior_rocksdb_merge_combine_read_size= @@rocksdb_merge_combine_read_size; +SET @prior_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +SET @prior_rocksdb_merge_buf_size = @@rocksdb_merge_buf_size; +SET global rocksdb_strict_collation_check = off; +SET session rocksdb_merge_combine_read_size = 566; +SET session rocksdb_merge_buf_size = 336; +show variables like '%rocksdb_bulk_load%'; +Variable_name Value +rocksdb_bulk_load OFF +rocksdb_bulk_load_size 1000 +CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB; +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +ALTER TABLE t1 ADD INDEX ka(a), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(80) DEFAULT NULL, + KEY `ka` (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > ""; +a +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +DROP TABLE t1; +SET session rocksdb_merge_buf_size = @prior_rocksdb_merge_buf_size; +SET session rocksdb_merge_combine_read_size = @prior_rocksdb_merge_combine_read_size; +SET global rocksdb_strict_collation_check = @prior_rocksdb_strict_collation_check; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +set global rocksdb_force_flush_memtable_now=1; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +larger +1 +larger +1 +Table Op Msg_type Msg_text +test.t1 analyze status OK +larger +1 +larger +1 +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +select 1300 < 1300 * 1.5 as "same"; +same +1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result new file mode 100644 index 0000000000000..61105fa1ba2c9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_cardinality.result @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); +SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed'; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +connect con1,localhost,root,,; +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SET debug_sync= 'now SIGNAL flushed'; +connection default; +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5,5 +disconnect con1; +SET debug_sync='RESET'; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result new file mode 100644 index 0000000000000..6abc9e6138692 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_crash.result @@ -0,0 +1,93 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +# crash_during_online_index_creation +flush logs; +SET SESSION debug_dbug="+d,crash_during_online_index_creation"; +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug_dbug="-d,crash_during_online_index_creation"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `ka` (`a`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +# crash_during_index_creation_partition +flush logs; +SET SESSION debug_dbug="+d,crash_during_index_creation_partition"; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +ERROR HY000: Lost connection to MySQL server during query +SET SESSION debug_dbug="-d,crash_during_index_creation_partition"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY KEY (i) +PARTITIONS 4 +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +# crash_during_index_creation_partition +flush logs; +SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback"; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +ERROR HY000: Intentional failure in inplace alter occurred. +SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback"; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY KEY (i) +PARTITIONS 4 +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + `k` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `j` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY KEY (i) +PARTITIONS 4 +SELECT COUNT(*) FROM t1; +COUNT(*) +100 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result new file mode 100644 index 0000000000000..2d1ba7ca1d895 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace_sstfilewriter.result @@ -0,0 +1,72 @@ +drop table if exists t1; +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30)) COLLATE 'latin1_bin'; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +set rocksdb_bulk_load=0; +select count(pk) from t1; +count(pk) +3000000 +select count(a) from t1; +count(a) +3000000 +select count(b) from t1; +count(b) +3000000 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +SELECT COUNT(*) as c FROM +(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`) +UNION DISTINCT +SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', +`b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE +INDEX(`kb_copy`)) as temp; +c +1 +select count(*) from t1 FORCE INDEX(kb); +count(*) +3000000 +select count(*) from t1 FORCE INDEX(kb_copy); +count(*) +3000000 +select count(*) from t1 FORCE INDEX(PRIMARY); +count(*) +3000000 +ALTER TABLE t1 DROP INDEX kb, ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX kb_copy, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(kab); +COUNT(*) +3000000 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +3000000 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` char(5) COLLATE latin1_bin NOT NULL, + `a` char(30) COLLATE latin1_bin DEFAULT NULL, + `b` char(30) COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`pk`), + KEY `kb` (`b`), + KEY `kab` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`) COMMENT 'rev:cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +3 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result new file mode 100644 index 0000000000000..9270dca7b1d6b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result @@ -0,0 +1,89 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +INSERT INTO t1 (a,b) VALUES (4,5); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +ERROR 23000: Duplicate entry '5' for key 'kb' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +INSERT INTO t1 (a,b) VALUES (4,5); +ERROR 23000: Duplicate entry '5' for key 'kb' +INSERT INTO t1 (a,b) VALUES (5,8); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + UNIQUE KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1 FORCE INDEX(kb); +a b +1 5 +2 6 +3 7 +5 8 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, NULL); +INSERT INTO t1 (a, b) VALUES (3, NULL); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +INSERT INTO t1 (a, b) VALUES (4, NULL); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + UNIQUE KEY `kb` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +4 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (2,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (3,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (4,1,5); +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + UNIQUE KEY `kbc` (`b`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kbc); +COUNT(*) +4 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +ERROR HY000: Unique index support is disabled when the table has no primary key. +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result new file mode 100644 index 0000000000000..4fef9bce405a4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_pk_concurrent_insert.result @@ -0,0 +1,7 @@ +drop table if exists t1; +# Binary must be compiled with debug for this test +CREATE TABLE t1 (a INT) ENGINE=rocksdb; +SELECT COUNT(*) from t1; +COUNT(*) +400 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result new file mode 100644 index 0000000000000..34a14ff39d82c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result @@ -0,0 +1,251 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,'a'),(5,'z'); +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +a c b +1 a NULL +5 z NULL +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +a c +1 a +5 z +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8)) ENGINE=rocksdb; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +Table Op Msg_type Msg_text +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +Table Op Msg_type Msg_text +test.t2 check status OK +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb; +ERROR HY000: Unique index support is disabled when the table has no primary key. +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (36,'foo'); +DELETE FROM t1 WHERE a = 35 AND b = 'foo'; +SELECT * FROM t1; +a b +36 foo +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result new file mode 100644 index 0000000000000..f8508febb0111 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result @@ -0,0 +1,780 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +ALTER TABLE t1 ADD INDEX (b); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `b` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +ALTER TABLE t1 DROP INDEX b; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +# +# MDEV-4313: RocksDB: Server crashes in Rdb_key_def::setup on dropping the primary key column +# +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT NOT NULL, KEY(i)) ENGINE=RocksDB; +ALTER TABLE t1 DROP COLUMN `pk`; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`), + KEY `b` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a, b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `a` (`a`), + KEY `b` (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, d INT, KEY kab(a, b), KEY kbc(b, c), KEY kabc(a,b,c)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + `d` int(11) DEFAULT NULL, + KEY `kab` (`a`,`b`), + KEY `kbc` (`b`,`c`), + KEY `kabc` (`a`,`b`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b int(11) YES MUL NULL +c int(11) YES NULL +d int(11) YES NULL +INSERT INTO t1 (a,b,c,d) VALUES (1,2,3,4); +INSERT INTO t1 (a,b,c,d) VALUES (5,6,7,8); +INSERT INTO t1 (a,b,c,d) VALUES (10,11,12,13); +INSERT INTO t1 (a,b,c,d) VALUES (14,15,16,17); +SELECT * FROM t1; +a b c d +1 2 3 4 +10 11 12 13 +14 15 16 17 +5 6 7 8 +SELECT * FROM t1 WHERE a = 1 OR a = 10; +a b c d +1 2 3 4 +10 11 12 13 +SELECT * FROM t1 WHERE c = 3 OR d = 17; +a b c d +1 2 3 4 +14 15 16 17 +SELECT * FROM t1 WHERE a > 5 OR d > 5; +a b c d +10 11 12 13 +14 15 16 17 +5 6 7 8 +SELECT a, b, c FROM t1 FORCE INDEX (kabc) WHERE a=1 OR b=11; +a b c +1 2 3 +10 11 12 +SELECT d FROM t1 FORCE INDEX (kbc) WHERE b > 6 AND c > 12; +d +17 +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-100, b=99 WHERE a>100; +SELECT * FROM t1; +a b c d +1 99 3 4 +10 99 12 13 +14 99 16 17 +5 99 7 8 +DELETE FROM t1 WHERE a>5; +DELETE FROM t1 WHERE b=99 AND d>4; +SELECT * FROM t1; +a b c d +1 99 3 4 +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), KEY ka(a) comment 'rev:cf1', KEY kb(b) +comment 'rev:cf1', KEY kab(a,b) comment 'rev:cf2') ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + KEY `ka` (`a`) COMMENT 'rev:cf1', + KEY `kb` (`b`) COMMENT 'rev:cf1', + KEY `kab` (`a`,`b`) COMMENT 'rev:cf2' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) YES MUL NULL +b char(8) YES MUL NULL +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); +SELECT * FROM t1 WHERE a = 35; +a b +35 foo +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +a b +35 foo +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +a b +76 bar +77 baz +SELECT * FROM t1 WHERE a > 35; +a b +76 bar +77 baz +SELECT * FROM t1; +a b +35 foo +76 bar +77 baz +UPDATE t1 SET a=a+100; +SELECT * FROM t1; +a b +135 foo +176 bar +177 baz +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +SELECT * FROM t1; +a b +35 bbb +76 bbb +77 bbb +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +SELECT * FROM t1; +a b +300 ccc +300 ccc +35 bbb +UPDATE t1 SET a=123 WHERE a=35; +SELECT * FROM t1; +a b +123 bbb +300 ccc +300 ccc +UPDATE t1 SET a=321 WHERE b='ccc'; +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +INSERT INTO t1 (a,b) VALUES (45,'bob'); +SELECT * FROM t1; +a b +123 bbb +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE a=123; +SELECT * FROM t1; +a b +321 ccc +321 ccc +45 bob +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +SELECT * FROM t1; +a b +45 bob +TRUNCATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 (col1 int, col2 int, KEY kcol1(col1)) ENGINE=ROCKSDB; +INSERT INTO t1 (col1, col2) values (2,2); +ALTER TABLE t1 ADD COLUMN extra INT; +UPDATE t1 SET col2 = 1; +select * from t1; +col1 col2 extra +2 1 NULL +DELETE FROM t1 WHERE col1 = 2; +set global rocksdb_force_flush_memtable_now = true; +select * from t1; +col1 col2 extra +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result new file mode 100644 index 0000000000000..a4e00626122cf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/alter_table.result @@ -0,0 +1,183 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,5,'z'); +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT 0, + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ALTER a DROP DEFAULT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE COLUMN b b1 CHAR(8) FIRST; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `b1` char(8) DEFAULT NULL, + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE b1 b INT AFTER c; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 CHANGE b b CHAR(8); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 MODIFY COLUMN b INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 MODIFY COLUMN b CHAR(8) FIRST; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `b` char(8) DEFAULT NULL, + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 MODIFY COLUMN b INT AFTER a; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `b` int(11) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 RENAME TO t2; +SHOW CREATE TABLE t1; +ERROR 42S02: Table 'test.t1' doesn't exist +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk` int(11) NOT NULL, + `a` int(11), + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,5),(2,2,2),(3,4,3); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ORDER BY b ASC, a DESC, pk DESC; +Warnings: +Warning 1105 ORDER BY ignored as there is a user-defined clustered index in the table 't1' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +pk a b +1 1 5 +2 2 2 +3 4 3 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b CHAR(8), c CHAR(8)) ENGINE=rocksdb CHARACTER SET latin1 COLLATE latin1_general_cs; +INSERT INTO t1 VALUES (1,5,'z','t'); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) COLLATE latin1_general_cs DEFAULT NULL, + `c` char(8) COLLATE latin1_general_cs DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_cs +ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL, + `c` char(8) DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 +ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) CHARACTER SET utf8 DEFAULT NULL, + `c` char(8) CHARACTER SET utf8 DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci +ALTER TABLE t1 FORCE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk` int(11) NOT NULL, + `a` int(11) DEFAULT NULL, + `b` char(8) CHARACTER SET utf8 DEFAULT NULL, + `c` char(8) CHARACTER SET utf8 DEFAULT NULL, + PRIMARY KEY (`pk`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result new file mode 100644 index 0000000000000..ff2973230db09 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,2,'b'); +CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (3,3,'c'); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +INSERT INTO t2 VALUES (1,4,'d'); +ANALYZE NO_WRITE_TO_BINLOG TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status OK +INSERT INTO t1 VALUES (4,5,'e'); +INSERT INTO t2 VALUES (2,6,'f'); +ANALYZE LOCAL TABLE t1, t2; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +DROP TABLE t1, t2; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result b/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result new file mode 100644 index 0000000000000..a5d81031cd28d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/apply_changes_iter.result @@ -0,0 +1,64 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 ( +pk INT NOT NULL PRIMARY KEY, +key1 INT NOT NULL, +KEY (key1) +) ENGINE=ROCKSDB; +INSERT INTO t1 VALUES (12,12); +INSERT INTO t1 VALUES (6,6); +BEGIN; +INSERT INTO t1 VALUES (8,8), (10,10); +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +pk key1 +6 6 +8 8 +10 10 +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +pk key1 +6 6 +8 8 +10 10 +ROLLBACK; +CREATE TABLE t2 ( +pk INT NOT NULL PRIMARY KEY, +key1 INT NOT NULL, +KEY (key1) COMMENT 'rev:cf' +) ENGINE=ROCKSDB; +INSERT INTO t2 VALUES (12,12); +INSERT INTO t2 VALUES (6,6); +BEGIN; +INSERT INTO t2 VALUES (8,8), (10,10); +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +pk key1 +6 6 +8 8 +10 10 +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +pk key1 +10 10 +8 8 +6 6 +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +pk key1 +6 6 +8 8 +10 10 +ROLLBACK; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result new file mode 100644 index 0000000000000..100bc5fd63862 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT AUTO_INCREMENT, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (pk) VALUES (3), (2), (1); +SELECT * FROM t1; +pk a +3 1 +2 2 +1 3 +INSERT INTO t1 (pk) VALUES (4); +SELECT * FROM t1; +pk a +3 1 +2 2 +1 3 +4 4 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result new file mode 100644 index 0000000000000..0fb3d96c58ff4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars.result @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS t1; +#--------------------------- +# auto_increment_offset +#--------------------------- +SET auto_increment_offset = 200; +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b'),(NULL,'c'); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +SELECT a,b FROM t1 ORDER BY a; +a b +1 a +2 b +3 c +#--------------------------- +# auto_increment_increment +#--------------------------- +SET auto_increment_increment = 300; +INSERT INTO t1 (a,b) VALUES (NULL,'d'),(NULL,'e'),(NULL,'f'); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +200 +SELECT a,b FROM t1 ORDER BY a; +a b +1 a +2 b +3 c +200 d +500 e +800 f +SET auto_increment_increment = 50; +INSERT INTO t1 (a,b) VALUES (NULL,'g'),(NULL,'h'),(NULL,'i'); +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +850 +SELECT a,b FROM t1 ORDER BY a; +a b +1 a +2 b +3 c +200 d +500 e +800 f +850 g +900 h +950 i +DROP TABLE t1; +#--------------------------- +# offset is greater than the max value +#--------------------------- +SET auto_increment_increment = 500; +SET auto_increment_offset = 300; +CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +# In MariaDB, this is an error: +INSERT INTO t1 (a) VALUES (NULL); +ERROR 22003: Out of range value for column 'a' at row 1 +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +850 +SELECT a FROM t1 ORDER BY a; +a +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result new file mode 100644 index 0000000000000..db64778d345bd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread.result @@ -0,0 +1,38 @@ +#--------------------------- +# two threads inserting simultaneously with increment > 1 +# Issue #390 +#--------------------------- +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +connect con1, localhost, root,,; +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; +INSERT INTO t1 VALUES(NULL); +connect con2, localhost, root,,; +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; +connect con3, localhost, root,,; +connection con1; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go1'; +INSERT INTO t1 VALUES(NULL); +connection con2; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go2'; +INSERT INTO t1 VALUES(NULL); +connection default; +SET debug_sync='now WAIT_FOR parked1'; +SET debug_sync='now WAIT_FOR parked2'; +SET debug_sync='now SIGNAL go1'; +connection con3; +SET debug_sync='now SIGNAL go2'; +connection default; +connection con1; +connection con2; +connection default; +SET debug_sync='RESET'; +disconnect con1; +disconnect con2; +SELECT * FROM t1; +a +1 +3 +5 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result new file mode 100644 index 0000000000000..a14ffdec2e3e1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result @@ -0,0 +1,96 @@ +#--------------------------- +# ten threads inserting simultaneously with increment > 1 +# Issue #390 +#--------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 9 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 8 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 7 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 6 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 5 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 4 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 3 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 2 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 1 + 1; +connect con$i, localhost, root,,; +SET auto_increment_increment = 100; +SET auto_increment_offset = 0 + 1; +connection default; +connection con9; +LOAD DATA INFILE INTO TABLE t1; +connection con8; +LOAD DATA INFILE INTO TABLE t1; +connection con7; +LOAD DATA INFILE INTO TABLE t1; +connection con6; +LOAD DATA INFILE INTO TABLE t1; +connection con5; +LOAD DATA INFILE INTO TABLE t1; +connection con4; +LOAD DATA INFILE INTO TABLE t1; +connection con3; +LOAD DATA INFILE INTO TABLE t1; +connection con2; +LOAD DATA INFILE INTO TABLE t1; +connection con1; +LOAD DATA INFILE INTO TABLE t1; +connection con0; +LOAD DATA INFILE INTO TABLE t1; +connection default; +connection con9; +connection con8; +connection con7; +connection con6; +connection con5; +connection con4; +connection con3; +connection con2; +connection con1; +connection con0; +connection default; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000000 +SELECT thr, COUNT(pk) FROM t1 GROUP BY thr; +thr COUNT(pk) +0 100000 +1 100000 +2 100000 +3 100000 +4 100000 +5 100000 +6 100000 +7 100000 +8 100000 +9 100000 +disconnect con9; +disconnect con8; +disconnect con7; +disconnect con6; +disconnect con5; +disconnect con4; +disconnect con3; +disconnect con2; +disconnect con1; +disconnect con0; +SELECT * FROM t1 ORDER BY pk INTO OUTFILE ; +All pk values matched their expected values +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result new file mode 100644 index 0000000000000..28b5b6cd07038 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/autoincrement.result @@ -0,0 +1 @@ +# The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE. diff --git a/storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result b/storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result new file mode 100644 index 0000000000000..a3fc25cc81bd7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/blind_delete_without_tx_api.result @@ -0,0 +1,85 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key; +set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api; +DROP TABLE IF EXISTS t1,t2; +create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +SET session rocksdb_blind_delete_primary_key=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +1000 +SELECT count(*) FROM t1; +count(*) +9000 +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +9000 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +0 +SELECT count(*) FROM t2; +count(*) +9000 +SET session rocksdb_master_skip_tx_api=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +1000 +SELECT count(*) FROM t1; +count(*) +8000 +SELECT count(*) FROM t2; +count(*) +8000 +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +8000 +SELECT count(*) FROM t2; +count(*) +8000 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000; +DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +variable_value-@c +0 +SELECT count(*) FROM t1; +count(*) +7000 +SELECT count(*) FROM t2; +count(*) +7000 +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +7000 +SELECT count(*) FROM t2; +count(*) +7000 +DELETE FROM t1 WHERE id = 10; +SELECT count(*) FROM t1; +count(*) +7000 +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 't1'.*"); +include/wait_for_slave_sql_error.inc [errno=1032] +set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables; +set global rocksdb_read_free_rpl_tables="t.*"; +START SLAVE; +include/sync_slave_sql_with_master.inc +SELECT count(*) FROM t1; +count(*) +7000 +set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables; +SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key; +SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api; +DROP TABLE t1, t2; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result new file mode 100644 index 0000000000000..be93cf2eeadc5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result @@ -0,0 +1,1237 @@ +CREATE PROCEDURE bloom_start() +BEGIN +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +END// +CREATE PROCEDURE bloom_end() +BEGIN +select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +END// +create or replace table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) , +index id2 (id2) , +index id2_id1 (id2, id1) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id3_id1_id4 (id2, id3, id1, id4) , +index id3_id2 (id3, id2) +) engine=ROCKSDB; +create or replace table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) , +index id2 (id2) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id4_id5 (id2, id4, id5) , +index id3_id4 (id3, id4) , +index id3_id5 (id3, id5) +) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create or replace table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +create or replace table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_short_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +true +create or replace table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +create or replace table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 asc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +true +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 desc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +false +DROP PROCEDURE bloom_start; +DROP PROCEDURE bloom_end; +truncate table t1; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +truncate table t2; +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +drop table if exists t1; +drop table if exists t2; +drop table if exists r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result new file mode 100644 index 0000000000000..d5369e2dbedd2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter2.result @@ -0,0 +1,71 @@ +CREATE TABLE t0 (id1 VARCHAR(30), id2 INT, value INT, PRIMARY KEY (id1, id2)) ENGINE=rocksdb collate latin1_bin; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t0 WHERE id1='X' AND id2>=1; +COUNT(*) +10000 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +DROP TABLE t0; +CREATE TABLE t1 (id1 BIGINT, id2 INT, id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3>=2; +COUNT(*) +9999 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2>=1 AND id3>=2; +COUNT(*) +9999 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +DROP TABLE t1; +CREATE TABLE t2 (id1 INT, id2 VARCHAR(100), id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb collate latin1_bin; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); +count(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('00000000000000000000', '200'); +count(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('3', '200'); +count(*) +1 +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u = 0 then 'true' else 'false' end +true +DROP TABLE t2; +CREATE TABLE t3 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4)) ENGINE=rocksdb collate latin1_bin; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=5000 AND id3=1 AND id4=1; +COUNT(*) +0 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1; +COUNT(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1 AND id4 <= 500; +COUNT(*) +1 +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +case when variable_value-@u > 0 then 'true' else 'false' end +true +DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result new file mode 100644 index 0000000000000..6ad9867049dee --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result @@ -0,0 +1,122 @@ +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type', +KEY `id1_type2` (`id1`,`link_type`,`time`,`version`,`data`,`visibility`) COMMENT 'rev:cf_link_id1_type2', +KEY `id1_type3` (`id1`,`visibility`,`time`,`version`,`data`,`link_type`) COMMENT 'rev:cf_link_id1_type3' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 and time >= 0 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +## HA_READ_PREFIX_LAST +# BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c > 0 then 'true' else 'false' end +true +# BF len 19 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +# BF len 12 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and visibility = 1 order by time desc; +id1 id2 link_type visibility data time version +100 100 1 1 100 100 100 +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +DROP TABLE linktable; +# +# bloom filter prefix is 20 byte +# Create a key which is longer than that, so that we see that +# eq_cond_len= slice.size() - 1; +# doesnt work. +# +# indexnr 4 +# kp0 + 4 = 8 +# kp1 + 8 = 16 +# kp2 + 8 = 24 24>20 byte length prefix +# kp3 + 8 = 28 +create table t1 ( +pk int primary key, +kp0 int not null, +kp1 bigint not null, +kp2 bigint not null, +kp3 bigint not null, +key kp12(kp0, kp1, kp2, kp3) comment 'rev:x1' +) engine=rocksdb; +insert into t1 values (1, 1,1, 1,1); +insert into t1 values (10,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (11,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (20,2,2,0x12FFFFFFFFFF,1); +insert into t1 values (21,2,2,0x12FFFFFFFFFF,1); +explain +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref kp12 kp12 20 const,const,const # Using where; Using index +show status like '%rocksdb_bloom_filter_prefix%'; +Variable_name Value +Rocksdb_bloom_filter_prefix_checked 0 +Rocksdb_bloom_filter_prefix_useful 0 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +pk kp0 kp1 kp2 kp3 +11 1 1 20890720927743 1 +10 1 1 20890720927743 1 +show status like '%rocksdb_bloom_filter_prefix%'; +Variable_name Value +Rocksdb_bloom_filter_prefix_checked 0 +Rocksdb_bloom_filter_prefix_useful 0 +# The following MUST show TRUE: +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +case when variable_value-@c = 0 then 'true' else 'false' end +true +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result new file mode 100644 index 0000000000000..1f4d1a641a252 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result @@ -0,0 +1,30 @@ +CREATE TABLE t1 ( +`id1` int unsigned NOT NULL DEFAULT '0', +`id2` int unsigned NOT NULL DEFAULT '0', +`link_type` int unsigned NOT NULL DEFAULT '0', +`visibility` tinyint NOT NULL DEFAULT '0', +`data` varchar(255) NOT NULL DEFAULT '', +`time` int unsigned NOT NULL DEFAULT '0', +`version` int unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (id1, link_type, visibility, id2) COMMENT 'rev:cf_link_pk' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +CREATE PROCEDURE select_test() +BEGIN +DECLARE id1_cond INT; +SET id1_cond = 1; +WHILE id1_cond <= 20000 DO +SELECT count(*) AS cnt FROM (SELECT id1 FROM t1 FORCE INDEX (PRIMARY) WHERE id1 = id1_cond AND link_type = 1 AND visibility = 1 ORDER BY id2 DESC) AS t INTO @cnt; +IF @cnt < 1 THEN +SELECT id1_cond, @cnt; +END IF; +SET id1_cond = id1_cond + 1; +END WHILE; +END// +"Skipping bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=1; +CALL select_test(); +"Using bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=0; +CALL select_test(); +DROP PROCEDURE select_test; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result new file mode 100644 index 0000000000000..18f007be4b259 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result @@ -0,0 +1,1237 @@ +CREATE PROCEDURE bloom_start() +BEGIN +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +END// +CREATE PROCEDURE bloom_end() +BEGIN +select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +END// +create or replace table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) , +index id2 (id2) , +index id2_id1 (id2, id1) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id3_id1_id4 (id2, id3, id1, id4) , +index id3_id2 (id3, id2) +) engine=ROCKSDB; +create or replace table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) , +index id2 (id2) , +index id2_id3 (id2, id3) , +index id2_id4 (id2, id4) , +index id2_id4_id5 (id2, id4, id5) , +index id3_id4 (id3, id4) , +index id3_id5 (id3, id5) +) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create or replace table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_short_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +create or replace table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_short_prefix', +index id2 (id2) COMMENT 'cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_short_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_short_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_short_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_short_prefix' +) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create or replace table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id1 (id2, id1) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'cf_long_prefix', +index id3_id2 (id3, id2) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +create or replace table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 asc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +false +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 desc; +id1 id2 id3 v1 v2 +1 1 1 1 1 +call bloom_end(); +checked +false +DROP PROCEDURE bloom_start; +DROP PROCEDURE bloom_end; +truncate table t1; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +truncate table t2; +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +drop table if exists t1; +drop table if exists t2; +drop table if exists r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result new file mode 100644 index 0000000000000..d75355f599f5b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; +CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; +CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' + PARTITION BY KEY() PARTITIONS 4; +connect other,localhost,root,,; +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +connection default; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t2; +LOAD DATA INFILE INTO TABLE t3; +set rocksdb_bulk_load=0; +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +ANALYZE TABLE t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +select count(pk) from t1; +count(pk) +5000000 +select count(a) from t1; +count(a) +5000000 +select count(b) from t1; +count(b) +5000000 +select count(pk) from t2; +count(pk) +5000000 +select count(a) from t2; +count(a) +5000000 +select count(b) from t2; +count(b) +5000000 +select count(pk) from t3; +count(pk) +5000000 +select count(a) from t3; +count(a) +5000000 +select count(b) from t3; +count(b) +5000000 +longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp +test.bulk_load.tmp +disconnect other; +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result new file mode 100644 index 0000000000000..840ad9a723cf8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS t1; +create table t1( +id bigint not null primary key, +i1 bigint, #unique +i2 bigint, #repeating +c1 varchar(20), #unique +c2 varchar(20), #repeating +index t1_1(id, i1), +index t1_2(i1, i2), +index t1_3(i2, i1), +index t1_4(c1, c2), +index t1_5(c2, c1) +) engine=rocksdb; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +show index in t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 1 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 2 i2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_3 1 i2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_3 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 1 c1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 2 c2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_5 1 c2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +restarting... +show index in t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 1 id A 100000 NULL NULL LSMTREE +t1 1 t1_1 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 1 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_2 2 i2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_3 1 i2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_3 2 i1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 1 c1 A 100000 NULL NULL YES LSMTREE +t1 1 t1_4 2 c2 A 100000 NULL NULL YES LSMTREE +t1 1 t1_5 1 c2 A 11111 NULL NULL YES LSMTREE +t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/check_table.result b/storage/rocksdb/mysql-test/rocksdb/r/check_table.result new file mode 100644 index 0000000000000..116c168c4da51 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/check_table.result @@ -0,0 +1,68 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +Table Op Msg_type Msg_text +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +Table Op Msg_type Msg_text +test.t2 check status OK +test.t1 check status OK +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +test.t2 check status OK +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result b/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result new file mode 100644 index 0000000000000..fd1ac63629fd4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/checkpoint.result @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +CREATE TABLE t1 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t2 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t3 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t4 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +DELETE FROM t4; +CREATE TABLE t5 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t5; +SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '[CHECKPOINT]'; +CURRENT +SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '[CHECKPOINT]'; +CURRENT +truncate table t1; +optimize table t1; +truncate table t2; +optimize table t2; +truncate table t3; +optimize table t3; +truncate table t4; +optimize table t4; +truncate table t5; +optimize table t5; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +drop table if exists t4; +drop table if exists t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result new file mode 100644 index 0000000000000..bb209856a971e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table.result @@ -0,0 +1,92 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; +CHECKSUM TABLE t1; +Table Checksum +test.t1 4259194219 +CHECKSUM TABLE t2, t1; +Table Checksum +test.t2 0 +test.t1 4259194219 +CHECKSUM TABLE t1, t2 QUICK; +Table Checksum +test.t1 NULL +test.t2 NULL +CHECKSUM TABLE t1, t2 EXTENDED; +Table Checksum +test.t1 4259194219 +test.t2 0 +DROP TABLE t1, t2; +# +# Issue #110: SQL command checksum returns inconsistent result +# +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +Table Checksum +test.t1 1303411884 +checksum table t1; +Table Checksum +test.t1 1303411884 +select * from t1 where pk=2; +pk col1 +2 fooo +checksum table t1; +Table Checksum +test.t1 1303411884 +checksum table t1; +Table Checksum +test.t1 1303411884 +flush tables; +checksum table t1; +Table Checksum +test.t1 1303411884 +checksum table t1; +Table Checksum +test.t1 1303411884 +drop table t1; +# +# The following test is about making sure MyRocks CHECKSUM TABLE +# values are the same as with InnoDB. +# If you see checksum values changed, make sure their counterparts +# in suite/innodb/r/checksum-matches-myrocks.result match. +# +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +Table Checksum +test.t1 1303411884 +drop table t1; +create table t1 ( +pk bigint unsigned primary key, +col1 varchar(10), +col2 tinyint, +col3 double +) engine=rocksdb; +# MariaDB has changed the checksumming algorithm +# Enable the old algorithm: +set @tmp_old=@@old; +set old=1; +checksum table t1; +Table Checksum +test.t1 0 +insert into t1 values (1, NULL, NULL, NULL); +insert into t1 values (2, 'foo', NULL, NULL); +checksum table t1; +Table Checksum +test.t1 3633741545 +insert into t1 values (3, NULL, 123, NULL); +insert into t1 values (4, NULL, NULL, 2.78); +checksum table t1; +Table Checksum +test.t1 390004011 +insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78); +insert into t1 values (6, '', NULL, 2.78); +checksum table t1; +Table Checksum +test.t1 3183101003 +set old=@tmp_old; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result new file mode 100644 index 0000000000000..fb86c0af26074 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/checksum_table_live.result @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +CHECKSUM TABLE t1; +Table Checksum +test.t1 4259194219 +CHECKSUM TABLE t2, t1; +Table Checksum +test.t2 0 +test.t1 4259194219 +CHECKSUM TABLE t1, t2 QUICK; +Table Checksum +test.t1 NULL +test.t2 NULL +CHECKSUM TABLE t1, t2 EXTENDED; +Table Checksum +test.t1 4259194219 +test.t2 0 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result new file mode 100644 index 0000000000000..af53f0617539a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_default.result @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY DEFAULT '0') ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) NO PRI 0 +INSERT INTO t1 (a) VALUES (1); +SELECT a FROM t1; +a +1 +ALTER TABLE t1 ADD COLUMN b CHAR(8) DEFAULT ''; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a int(11) NO PRI 0 +b char(8) YES +INSERT INTO t1 (b) VALUES ('a'); +SELECT a,b FROM t1 ORDER BY a,b; +a b +0 a +1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result new file mode 100644 index 0000000000000..7f197f05b1590 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_not_null.result @@ -0,0 +1,2603 @@ +######################## +# BINARY columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BINARY NOT NULL, +b0 BINARY(0) NOT NULL, +b1 BINARY(1) NOT NULL, +b20 BINARY(20) NOT NULL, +b255 BINARY(255) NOT NULL, +pk BINARY PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) NO NULL +b0 binary(0) NO NULL +b1 binary(1) NO NULL +b20 binary(20) NO NULL +b255 binary(255) NO NULL +pk binary(1) NO PRI NULL +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b20' at row 1 +Warning 1265 Data truncated for column 'b255' at row 1 +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; +ERROR 23000: Duplicate entry 'c' for key 'PRIMARY' +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +ALTER TABLE t1 ADD COLUMN b257 BINARY(257) NOT NULL; +ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) NO NULL +b0 binary(0) NO NULL +b1 binary(1) NO NULL +b20 binary(20) NO NULL +b255 binary(255) NO NULL +pk binary(1) NO PRI NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BINARY NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BINARY NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c binary(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +30 +DROP TABLE t1; +#---------------------------------- +# BINARY NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BINARY NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c binary(1) NO 0 +ALTER TABLE t1 ADD COLUMN err BINARY NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 30 +2 30 +DROP TABLE t1; +######################## +# VARBINARY columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARBINARY(0) NOT NULL, +v1 VARBINARY(1) NOT NULL, +v64 VARBINARY(64) NOT NULL, +v65000 VARBINARY(65000) NOT NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) NO NULL +v1 varbinary(1) NO NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) NO NULL +CREATE TABLE t2 (v VARBINARY(65532) NOT NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varbinary(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; +HEX(v0) HEX(v1) HEX(v64) HEX(v65000) + + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20 +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +Warning 1265 Data truncated for column 'v65000' at row 6 +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; +HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000)) + 0 + 61 0 + 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932 + 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000 + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930 + 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000 +ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) NOT NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARBINARY to BLOB +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) NO NULL +v1 varbinary(1) NO NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) NO NULL +v65536 mediumblob NO NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# VARBINARY(64) NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c VARBINARY(64) NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varbinary(64) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test'); +SELECT HEX(c) FROM t1; +HEX(c) +74657374 +DROP TABLE t1; +#---------------------------------- +# VARBINARY(64) NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c VARBINARY(64) NOT NULL DEFAULT 'test' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varbinary(64) NO test +ALTER TABLE t1 ADD COLUMN err VARBINARY(64) NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 74657374 +2 74657374 +DROP TABLE t1; +######################## +# BIT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT NOT NULL, +b BIT(20) NOT NULL, +c BIT(64) NOT NULL, +d BIT(1) NOT NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) NO NULL +b bit(20) NO NULL +c bit(64) NO PRI NULL +d bit(1) NO NULL +ALTER TABLE t1 DROP COLUMN d; +ALTER TABLE t1 ADD COLUMN d BIT(0) NOT NULL; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) NO NULL +b bit(20) NO NULL +c bit(64) NO PRI NULL +d bit(1) NO NULL +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; +BIN(a) HEX(b) c+0 +0 FFFFF 18446744073709551615 +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; +a+0 b+0 c+0 +0 1048575 18446744073709551615 +1 0 18446744073709551614 +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); +a+0 b+0 c+0 +1 0 18446744073709551614 +1 102 255 +DELETE FROM t1; +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +Warnings: +Warning 1264 Out of range value for column 'c' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +1 0 18446744073709551615 0 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NOT NULL) ENGINE=rocksdb; +ERROR 42000: Display width out of range for 'a' (max = 64) +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BIT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BIT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bit(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# BIT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BIT NOT NULL DEFAULT 1 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bit(1) NO b'1' +ALTER TABLE t1 ADD COLUMN err BIT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +######################## +# BLOB columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b BLOB NOT NULL, +b0 BLOB(0) NOT NULL, +b1 BLOB(1) NOT NULL, +b300 BLOB(300) NOT NULL, +bm BLOB(65535) NOT NULL, +b70k BLOB(70000) NOT NULL, +b17m BLOB(17000000) NOT NULL, +t TINYBLOB NOT NULL, +m MEDIUMBLOB NOT NULL, +l LONGBLOB NOT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b blob NO NULL +b0 blob NO NULL +b1 tinyblob NO NULL +b300 blob NO NULL +bm blob NO NULL +b70k mediumblob NO NULL +b17m longblob NO NULL +t tinyblob NO NULL +m mediumblob NO NULL +l longblob NO NULL +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b300' at row 1 +Warning 1265 Data truncated for column 'bm' at row 1 +Warning 1265 Data truncated for column 't' at row 1 +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); +ERROR 42000: Display width out of range for 'bbb' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c blob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# BLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c blob NO '' +ALTER TABLE t1 ADD COLUMN err BLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TINYBLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYBLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyblob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# TINYBLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TINYBLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyblob NO '' +ALTER TABLE t1 ADD COLUMN err TINYBLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# MEDIUMBLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMBLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumblob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# MEDIUMBLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c MEDIUMBLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumblob NO '' +ALTER TABLE t1 ADD COLUMN err MEDIUMBLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# LONGBLOB NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c LONGBLOB NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longblob NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# LONGBLOB NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c LONGBLOB NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longblob NO '' +ALTER TABLE t1 ADD COLUMN err LONGBLOB NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +######################## +# BOOL columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b1 BOOL NOT NULL, +b2 BOOLEAN NOT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b1 tinyint(1) NO NULL +b2 tinyint(1) NO NULL +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; +b1 b2 +1 1 +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +INSERT INTO t1 (b1,b2) VALUES (2,3); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +1 1 +2 3 +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; +a b +false false +true true +true true +true true +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; +b1 b2 +1 1 +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; +b1 b2 +0 0 +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +Warnings: +Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1 +Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (128,-129); +Warnings: +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +127 -128 +2 3 +ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NOT NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNSIGNED NOT NULL' at line 1 +ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NOT NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NOT NULL' at line 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BOOL NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BOOL NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('0'); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# BOOL NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BOOL NOT NULL DEFAULT '0' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(1) NO 0 +ALTER TABLE t1 ADD COLUMN err BOOL NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('0'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +######################## +# CHAR columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR NOT NULL, +c0 CHAR(0) NOT NULL, +c1 CHAR(1) NOT NULL, +c20 CHAR(20) NOT NULL, +c255 CHAR(255) NOT NULL, +PRIMARY KEY (c255) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) NO NULL +c0 char(0) NO NULL +c1 char(1) NO NULL +c20 char(20) NO NULL +c255 char(255) NO PRI NULL +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +Warning 1265 Data truncated for column 'c0' at row 1 +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c20' at row 1 +Warning 1265 Data truncated for column 'c255' at row 1 +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'c' at row 5 +Warning 1265 Data truncated for column 'c0' at row 5 +Warning 1265 Data truncated for column 'c1' at row 5 +Warning 1265 Data truncated for column 'c20' at row 5 +Warning 1265 Data truncated for column 'c' at row 6 +Warning 1265 Data truncated for column 'c0' at row 6 +Warning 1265 Data truncated for column 'c1' at row 6 +Warning 1265 Data truncated for column 'c20' at row 6 +Warning 1265 Data truncated for column 'c255' at row 6 +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + + a +C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b +a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; +c20 REPEAT('a',LENGTH(c20)) COUNT(*) + 2 +Creating an article aaaaaaaaaaaaaaaaaaa 1 +aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1 +abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1 +xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1 +ALTER TABLE t1 ADD COLUMN c257 CHAR(257) NOT NULL; +ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# CHAR NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c char(1) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('_'); +SELECT HEX(c) FROM t1; +HEX(c) +5F +DROP TABLE t1; +#---------------------------------- +# CHAR NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c CHAR NOT NULL DEFAULT '_' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c char(1) NO _ +ALTER TABLE t1 ADD COLUMN err CHAR NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('_'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 5F +2 5F +DROP TABLE t1; +######################## +# VARCHAR columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARCHAR(0) NOT NULL, +v1 VARCHAR(1) NOT NULL, +v64 VARCHAR(64) NOT NULL, +v65000 VARCHAR(65000) NOT NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) NO NULL +v1 varchar(1) NO NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) NO NULL +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varchar(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT v0,v1,v64,v65000 FROM t1; +v0 v1 v64 v65000 + + + + + + + + + + + + y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly) + o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject. + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Free to read in the Knowledgebase! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + o The 'default' book to read if you wont to learn to use MySQL / MariaDB. + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + * MariaDB Crash Course by Ben Forta + * MySQL (4th Edition) by Paul DuBois + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + * MySQL Admin Cookbook + * MySQL Cookbook by Paul DuBois + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + For MariaDB / MySQL end users + For developers who want to code on MariaDB or MySQL + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v65000' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; +v0 v1 v64 LENGTH(v65000) + 0 + a 0 + H aHere is a list of recommended books on MariaDB and MySQL. We've 2966 + a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000 + y Once there, double check that an article doesn't already exist 2965 + y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000 +ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) NOT NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARCHAR to TEXT +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) NO NULL +v1 varchar(1) NO NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) NO NULL +v65536 mediumtext NO NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# VARCHAR(64) NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c VARCHAR(64) NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varchar(64) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test default'); +SELECT HEX(c) FROM t1; +HEX(c) +746573742064656661756C74 +DROP TABLE t1; +#---------------------------------- +# VARCHAR(64) NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c VARCHAR(64) NOT NULL DEFAULT 'test default' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c varchar(64) NO test default +ALTER TABLE t1 ADD COLUMN err VARCHAR(64) NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test default'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 746573742064656661756C74 +2 746573742064656661756C74 +DROP TABLE t1; +######################## +# date and time columns +######################## +set @col_opt_not_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; +DROP TABLE IF EXISTS t1; +set @save_time_zone=@@time_zone; +set time_zone='UTC'; +CREATE TABLE t1 ( +d DATE NOT NULL, +dt DATETIME NOT NULL, +ts TIMESTAMP NOT NULL, +t TIME NOT NULL, +y YEAR NOT NULL, +y4 YEAR(4) NOT NULL, +y2 YEAR(2) NOT NULL, +pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d date NO NULL +dt datetime NO NULL +ts timestamp NO NULL +t time NO NULL +y year(4) NO NULL +y4 year(4) NO NULL +y2 year(2) NO NULL +pk datetime NO PRI NULL +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); +Warnings: +Warning 1265 Data truncated for column 'd' at row 1 +Warning 1265 Data truncated for column 'dt' at row 1 +Warning 1265 Data truncated for column 'ts' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 'y' at row 1 +Warning 1264 Out of range value for column 'y4' at row 1 +Warning 1264 Out of range value for column 'y2' at row 1 +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 +set time_zone=@save_time_zone; +DROP TABLE t1; +SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DATE NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DATE NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c date NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21'); +SELECT HEX(c) FROM t1; +HEX(c) +323031322D31322D3231 +DROP TABLE t1; +#---------------------------------- +# DATE NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DATE NOT NULL DEFAULT '2012-12-21' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c date NO 2012-12-21 +ALTER TABLE t1 ADD COLUMN err DATE NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 323031322D31322D3231 +2 323031322D31322D3231 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DATETIME NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DATETIME NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c datetime NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +SELECT HEX(c) FROM t1; +HEX(c) +323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +#---------------------------------- +# DATETIME NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DATETIME NOT NULL DEFAULT '2012-12-21 12:21:12' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c datetime NO 2012-12-21 12:21:12 +ALTER TABLE t1 ADD COLUMN err DATETIME NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 323031322D31322D32312031323A32313A3132 +2 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TIMESTAMP NOT NULL column without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TIMESTAMP NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c timestamp NO NULL +INSERT INTO t1 (c) VALUES (NULL); +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +SELECT HEX(c) FROM t1; +HEX(c) +323031332D31322D31322031323A31323A3132 +323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +#---------------------------------- +# TIMESTAMP NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TIMESTAMP NOT NULL DEFAULT '2012-12-21 12:21:12' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c timestamp NO 2012-12-21 12:21:12 +ALTER TABLE t1 ADD COLUMN err TIMESTAMP NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +set @save_ts=@@timestamp; +set timestamp=1478923914; +INSERT INTO t1 (c) VALUES (NULL); +set timestamp=@save_ts; +INSERT INTO t1 (c) VALUES ('2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 323031362D31312D31322030343A31313A3534 +2 323031322D31322D32312031323A32313A3132 +3 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TIME NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TIME NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c time NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12:21:12'); +SELECT HEX(c) FROM t1; +HEX(c) +31323A32313A3132 +DROP TABLE t1; +#---------------------------------- +# TIME NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TIME NOT NULL DEFAULT '12:21:12' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c time NO 12:21:12 +ALTER TABLE t1 ADD COLUMN err TIME NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 31323A32313A3132 +2 31323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# YEAR NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(4) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012'); +SELECT HEX(c) FROM t1; +HEX(c) +7DC +DROP TABLE t1; +#---------------------------------- +# YEAR NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c YEAR NOT NULL DEFAULT '2012' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(4) NO 2012 +ALTER TABLE t1 ADD COLUMN err YEAR NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('2012'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7DC +2 7DC +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# YEAR(2) NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c YEAR(2) NOT NULL) ENGINE=rocksdb; +Warnings: +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(2) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12'); +SELECT HEX(c) FROM t1; +HEX(c) +C +DROP TABLE t1; +#---------------------------------- +# YEAR(2) NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c YEAR(2) NOT NULL DEFAULT '12' +) ENGINE=rocksdb; +Warnings: +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c year(2) NO 12 +ALTER TABLE t1 ADD COLUMN err YEAR(2) NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 C +2 C +DROP TABLE t1; +set time_zone= @col_opt_not_nullsave_time_zone; +######################## +# ENUM columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('') NOT NULL, +b ENUM('test1','test2','test3','test4','test5') NOT NULL, +c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NOT NULL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') NO NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NO NULL +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; +a b c + test2 4 + test5 2 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + + test2 4 + test5 2 +ALTER TABLE t1 ADD COLUMN e ENUM('a','A') NOT NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in ENUM +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') NO NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NO NULL +e enum('a','A') NO NULL +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +SELECT a,b,c,e FROM t1; +a b c e + a + test2 4 a + test3 75 a + test5 2 a +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; +a b c e + test2 4 a +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# ENUM('test1','test2','test3') NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c ENUM('test1','test2','test3') NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c enum('test1','test2','test3') NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2'); +SELECT HEX(c) FROM t1; +HEX(c) +7465737432 +DROP TABLE t1; +#---------------------------------- +# ENUM('test1','test2','test3') NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c ENUM('test1','test2','test3') NOT NULL DEFAULT 'test2' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c enum('test1','test2','test3') NO test2 +ALTER TABLE t1 ADD COLUMN err ENUM('test1','test2','test3') NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7465737432 +2 7465737432 +DROP TABLE t1; +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL NOT NULL, +d0 DECIMAL(0) NOT NULL, +d1_1 DECIMAL(1,1) NOT NULL, +d10_2 DECIMAL(10,2) NOT NULL, +d60_10 DECIMAL(60,10) NOT NULL, +n NUMERIC NOT NULL, +n0_0 NUMERIC(0,0) NOT NULL, +n1 NUMERIC(1) NOT NULL, +n20_4 NUMERIC(20,4) NOT NULL, +n65_4 NUMERIC(65,4) NOT NULL, +pk NUMERIC NOT NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) NO NULL +d0 decimal(10,0) NO NULL +d1_1 decimal(1,1) NO NULL +d10_2 decimal(10,2) NO NULL +d60_10 decimal(60,10) NO NULL +n decimal(10,0) NO NULL +n0_0 decimal(10,0) NO NULL +n1 decimal(1,0) NO NULL +n20_4 decimal(20,4) NO NULL +n65_4 decimal(65,4) NO NULL +pk decimal(10,0) NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NOT NULL; +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NOT NULL; +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NOT NULL; +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DECIMAL NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DECIMAL NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# DECIMAL NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DECIMAL NOT NULL DEFAULT 1.1 +) ENGINE=rocksdb; +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO 1 +ALTER TABLE t1 ADD COLUMN err DECIMAL NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# NUMERIC NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c NUMERIC NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# NUMERIC NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c NUMERIC NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c decimal(10,0) NO 0 +ALTER TABLE t1 ADD COLUMN err NUMERIC NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT NOT NULL, +f0 FLOAT(0) NOT NULL, +r1_1 REAL(1,1) NOT NULL, +f23_0 FLOAT(23) NOT NULL, +f20_3 FLOAT(20,3) NOT NULL, +d DOUBLE NOT NULL, +d1_0 DOUBLE(1,0) NOT NULL, +d10_10 DOUBLE PRECISION (10,10) NOT NULL, +d53 DOUBLE(53,0) NOT NULL, +d53_10 DOUBLE(53,10) NOT NULL, +pk DOUBLE NOT NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float NO NULL +f0 float NO NULL +r1_1 double(1,1) NO NULL +f23_0 float NO NULL +f20_3 float(20,3) NO NULL +d double NO NULL +d1_0 double(1,0) NO NULL +d10_10 double(10,10) NO NULL +d53 double(53,0) NO NULL +d53_10 double(53,10) NO NULL +pk double NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NOT NULL; +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NOT NULL; +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NOT NULL; +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# FLOAT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c FLOAT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c float NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# FLOAT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c FLOAT NOT NULL DEFAULT 1.1 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c float NO 1.1 +ALTER TABLE t1 ADD COLUMN err FLOAT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1.1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# DOUBLE NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c DOUBLE NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c double NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# DOUBLE NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c DOUBLE NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c double NO 0 +ALTER TABLE t1 ADD COLUMN err DOUBLE NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT NOT NULL, +i0 INT(0) NOT NULL, +i1 INT(1) NOT NULL, +i20 INT(20) NOT NULL, +t TINYINT NOT NULL, +t0 TINYINT(0) NOT NULL, +t1 TINYINT(1) NOT NULL, +t20 TINYINT(20) NOT NULL, +s SMALLINT NOT NULL, +s0 SMALLINT(0) NOT NULL, +s1 SMALLINT(1) NOT NULL, +s20 SMALLINT(20) NOT NULL, +m MEDIUMINT NOT NULL, +m0 MEDIUMINT(0) NOT NULL, +m1 MEDIUMINT(1) NOT NULL, +m20 MEDIUMINT(20) NOT NULL, +b BIGINT NOT NULL, +b0 BIGINT(0) NOT NULL, +b1 BIGINT(1) NOT NULL, +b20 BIGINT(20) NOT NULL, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(11) NO NULL +i0 int(11) NO NULL +i1 int(1) NO NULL +i20 int(20) NO NULL +t tinyint(4) NO NULL +t0 tinyint(4) NO NULL +t1 tinyint(1) NO NULL +t20 tinyint(20) NO NULL +s smallint(6) NO NULL +s0 smallint(6) NO NULL +s1 smallint(1) NO NULL +s20 smallint(20) NO NULL +m mediumint(9) NO NULL +m0 mediumint(9) NO NULL +m1 mediumint(1) NO NULL +m20 mediumint(20) NO NULL +b bigint(20) NO NULL +b0 bigint(20) NO NULL +b1 bigint(1) NO NULL +b20 bigint(20) NO NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +Warning 1264 Out of range value for column 'i' at row 11 +Warning 1264 Out of range value for column 'i0' at row 11 +Warning 1264 Out of range value for column 'i1' at row 11 +Warning 1264 Out of range value for column 'i20' at row 11 +Warning 1264 Out of range value for column 't' at row 11 +Warning 1264 Out of range value for column 't0' at row 11 +Warning 1264 Out of range value for column 't1' at row 11 +Warning 1264 Out of range value for column 't20' at row 11 +Warning 1264 Out of range value for column 's' at row 11 +Warning 1264 Out of range value for column 's0' at row 11 +Warning 1264 Out of range value for column 's1' at row 11 +Warning 1264 Out of range value for column 's20' at row 11 +Warning 1264 Out of range value for column 'm' at row 11 +Warning 1264 Out of range value for column 'm0' at row 11 +Warning 1264 Out of range value for column 'm1' at row 11 +Warning 1264 Out of range value for column 'm20' at row 11 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) NOT NULL; +ERROR 42000: Display width out of range for 'i257' (max = 255) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# INT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c INT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c int(11) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (2147483647); +SELECT HEX(c) FROM t1; +HEX(c) +7FFFFFFF +DROP TABLE t1; +#---------------------------------- +# INT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c INT NOT NULL DEFAULT 2147483647 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c int(11) NO 2147483647 +ALTER TABLE t1 ADD COLUMN err INT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (2147483647); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7FFFFFFF +2 7FFFFFFF +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TINYINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(4) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (127); +SELECT HEX(c) FROM t1; +HEX(c) +7F +DROP TABLE t1; +#---------------------------------- +# TINYINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TINYINT NOT NULL DEFAULT 127 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinyint(4) NO 127 +ALTER TABLE t1 ADD COLUMN err TINYINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (127); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7F +2 7F +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# SMALLINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c SMALLINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c smallint(6) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +SELECT HEX(c) FROM t1; +HEX(c) +0 +DROP TABLE t1; +#---------------------------------- +# SMALLINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c SMALLINT NOT NULL DEFAULT 0 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c smallint(6) NO 0 +ALTER TABLE t1 ADD COLUMN err SMALLINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 0 +2 0 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# MEDIUMINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumint(9) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +SELECT HEX(c) FROM t1; +HEX(c) +1 +DROP TABLE t1; +#---------------------------------- +# MEDIUMINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c MEDIUMINT NOT NULL DEFAULT 1 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumint(9) NO 1 +ALTER TABLE t1 ADD COLUMN err MEDIUMINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 1 +2 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# BIGINT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c BIGINT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bigint(20) NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (9223372036854775807); +SELECT HEX(c) FROM t1; +HEX(c) +7FFFFFFFFFFFFFFF +DROP TABLE t1; +#---------------------------------- +# BIGINT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c BIGINT NOT NULL DEFAULT 9223372036854775807 +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c bigint(20) NO 9223372036854775807 +ALTER TABLE t1 ADD COLUMN err BIGINT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (9223372036854775807); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 7FFFFFFFFFFFFFFF +2 7FFFFFFFFFFFFFFF +DROP TABLE t1; +######################## +# SET columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('') NOT NULL, +b SET('test1','test2','test3','test4','test5') NOT NULL, +c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NOT NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') NO NULL +b set('test1','test2','test3','test4','test5') NO NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64 + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +ALTER TABLE t1 ADD COLUMN e SET('a','A') NOT NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in SET +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') NO NULL +b set('test1','test2','test3','test4','test5') NO NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +e set('a','A') NO NULL +ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NOT NULL; +ERROR HY000: Too many strings for column f and SET +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; +a b c e + test2,test3 01,23,34,44 + test2,test4 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# SET('test1','test2','test3') NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c SET('test1','test2','test3') NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c set('test1','test2','test3') NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2,test3'); +SELECT HEX(c) FROM t1; +HEX(c) +74657374322C7465737433 +DROP TABLE t1; +#---------------------------------- +# SET('test1','test2','test3') NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c SET('test1','test2','test3') NOT NULL DEFAULT 'test2,test3' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c set('test1','test2','test3') NO test2,test3 +ALTER TABLE t1 ADD COLUMN err SET('test1','test2','test3') NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES ('test2,test3'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 74657374322C7465737433 +2 74657374322C7465737433 +DROP TABLE t1; +######################## +# TEXT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +t TEXT NOT NULL, +t0 TEXT(0) NOT NULL, +t1 TEXT(1) NOT NULL, +t300 TEXT(300) NOT NULL, +tm TEXT(65535) NOT NULL, +t70k TEXT(70000) NOT NULL, +t17m TEXT(17000000) NOT NULL, +tt TINYTEXT NOT NULL, +m MEDIUMTEXT NOT NULL, +l LONGTEXT NOT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +t text NO NULL +t0 text NO NULL +t1 tinytext NO NULL +t300 text NO NULL +tm text NO NULL +t70k mediumtext NO NULL +t17m longtext NO NULL +tt tinytext NO NULL +m mediumtext NO NULL +l longtext NO NULL +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 't' at row 1 +Warning 1265 Data truncated for column 't0' at row 1 +Warning 1265 Data truncated for column 't1' at row 1 +Warning 1265 Data truncated for column 't300' at row 1 +Warning 1265 Data truncated for column 'tm' at row 1 +Warning 1265 Data truncated for column 'tt' at row 1 +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NOT NULL; +ERROR 42000: Display width out of range for 'ttt' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c text NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# TEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c text NO '' +ALTER TABLE t1 ADD COLUMN err TEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# TINYTEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c TINYTEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinytext NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# TINYTEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c TINYTEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c tinytext NO '' +ALTER TABLE t1 ADD COLUMN err TINYTEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# MEDIUMTEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c MEDIUMTEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumtext NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# MEDIUMTEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c MEDIUMTEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c mediumtext NO '' +ALTER TABLE t1 ADD COLUMN err MEDIUMTEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +#---------------------------------- +# LONGTEXT NOT NULL columns without a default +#---------------------------------- +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c LONGTEXT NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longtext NO NULL +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +SELECT HEX(c) FROM t1; +HEX(c) + +DROP TABLE t1; +#---------------------------------- +# LONGTEXT NOT NULL columns with a default +#---------------------------------- +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +c LONGTEXT NOT NULL DEFAULT '' +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +c longtext NO '' +ALTER TABLE t1 ADD COLUMN err LONGTEXT NOT NULL DEFAULT NULL; +ERROR 42000: Invalid default value for 'err' +INSERT INTO t1 (c) VALUES (NULL); +ERROR 23000: Column 'c' cannot be null +INSERT INTO t1 (c) VALUES (''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c) FROM t1 ORDER BY pk; +pk HEX(c) +1 +2 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result new file mode 100644 index 0000000000000..fb0e2af43626d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_null.result @@ -0,0 +1,2258 @@ +######################## +# BINARY columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BINARY NULL, +b0 BINARY(0) NULL, +b1 BINARY(1) NULL, +b20 BINARY(20) NULL, +b255 BINARY(255) NULL, +pk BINARY PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b20' at row 1 +Warning 1265 Data truncated for column 'b255' at row 1 +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; +ERROR 23000: Duplicate entry 'c' for key 'PRIMARY' +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +ALTER TABLE t1 ADD COLUMN b257 BINARY(257) NULL; +ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BINARY NULL, +c1 BINARY NULL DEFAULT NULL, +c2 BINARY NULL DEFAULT 0, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c binary(1) YES NULL +c1 binary(1) YES NULL +c2 binary(1) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0,0,0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 30 30 30 +3 NULL NULL 30 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 30 +3 30 +DROP TABLE t1; +######################## +# VARBINARY columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARBINARY(0) NULL, +v1 VARBINARY(1) NULL, +v64 VARBINARY(64) NULL, +v65000 VARBINARY(65000) NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) YES NULL +CREATE TABLE t2 (v VARBINARY(65532) NULL, PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varbinary(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; +HEX(v0) HEX(v1) HEX(v64) HEX(v65000) + + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20 +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +Warning 1265 Data truncated for column 'v65000' at row 6 +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; +HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000)) + 0 + 61 0 + 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932 + 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000 + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930 + 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000 +ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARBINARY to BLOB +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) YES NULL +v65536 mediumblob YES NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c VARBINARY(64) NULL, +c1 VARBINARY(64) NULL DEFAULT NULL, +c2 VARBINARY(64) NULL DEFAULT 'test', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c varbinary(64) YES NULL +c1 varbinary(64) YES NULL +c2 varbinary(64) YES test +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test','test','test'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 74657374 74657374 74657374 +3 NULL NULL 74657374 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 74657374 +3 74657374 +DROP TABLE t1; +######################## +# BIT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT NULL, +b BIT(20) NULL, +c BIT(64) NULL, +d BIT(1) NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI NULL +d bit(1) YES NULL +ALTER TABLE t1 DROP COLUMN d; +ALTER TABLE t1 ADD COLUMN d BIT(0) NULL; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI NULL +d bit(1) YES NULL +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; +BIN(a) HEX(b) c+0 +0 FFFFF 18446744073709551615 +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; +a+0 b+0 c+0 +0 1048575 18446744073709551615 +1 0 18446744073709551614 +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); +a+0 b+0 c+0 +1 0 18446744073709551614 +1 102 255 +DELETE FROM t1; +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +Warnings: +Warning 1264 Out of range value for column 'c' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +1 0 18446744073709551615 0 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) NULL) ENGINE=rocksdb; +ERROR 42000: Display width out of range for 'a' (max = 64) +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BIT NULL, +c1 BIT NULL DEFAULT NULL, +c2 BIT NULL DEFAULT 1, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c bit(1) YES NULL +c1 bit(1) YES NULL +c2 bit(1) YES b'1' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1,1,1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +######################## +# BLOB columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b BLOB NULL, +b0 BLOB(0) NULL, +b1 BLOB(1) NULL, +b300 BLOB(300) NULL, +bm BLOB(65535) NULL, +b70k BLOB(70000) NULL, +b17m BLOB(17000000) NULL, +t TINYBLOB NULL, +m MEDIUMBLOB NULL, +l LONGBLOB NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b blob YES NULL +b0 blob YES NULL +b1 tinyblob YES NULL +b300 blob YES NULL +bm blob YES NULL +b70k mediumblob YES NULL +b17m longblob YES NULL +t tinyblob YES NULL +m mediumblob YES NULL +l longblob YES NULL +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b300' at row 1 +Warning 1265 Data truncated for column 'bm' at row 1 +Warning 1265 Data truncated for column 't' at row 1 +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); +ERROR 42000: Display width out of range for 'bbb' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BLOB NULL, +c1 BLOB NULL DEFAULT NULL, +c2 BLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c blob YES NULL +c1 blob YES NULL +c2 blob YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TINYBLOB NULL, +c1 TINYBLOB NULL DEFAULT NULL, +c2 TINYBLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinyblob YES NULL +c1 tinyblob YES NULL +c2 tinyblob YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c MEDIUMBLOB NULL, +c1 MEDIUMBLOB NULL DEFAULT NULL, +c2 MEDIUMBLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c mediumblob YES NULL +c1 mediumblob YES NULL +c2 mediumblob YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c LONGBLOB NULL, +c1 LONGBLOB NULL DEFAULT NULL, +c2 LONGBLOB NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c longblob YES NULL +c1 longblob YES NULL +c2 longblob YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +######################## +# BOOL columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b1 BOOL NULL, +b2 BOOLEAN NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b1 tinyint(1) YES NULL +b2 tinyint(1) YES NULL +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; +b1 b2 +1 1 +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +INSERT INTO t1 (b1,b2) VALUES (2,3); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +1 1 +2 3 +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; +a b +false false +true true +true true +true true +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; +b1 b2 +1 1 +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; +b1 b2 +0 0 +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +Warnings: +Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1 +Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (128,-129); +Warnings: +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +127 -128 +2 3 +ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNSIGNED NULL' at line 1 +ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL NULL; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL NULL' at line 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BOOL NULL, +c1 BOOL NULL DEFAULT NULL, +c2 BOOL NULL DEFAULT '0', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinyint(1) YES NULL +c1 tinyint(1) YES NULL +c2 tinyint(1) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('0','0','0'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +######################## +# CHAR columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR NULL, +c0 CHAR(0) NULL, +c1 CHAR(1) NULL, +c20 CHAR(20) NULL, +c255 CHAR(255) NULL, +PRIMARY KEY (c255) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) YES NULL +c0 char(0) YES NULL +c1 char(1) YES NULL +c20 char(20) YES NULL +c255 char(255) NO PRI NULL +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +Warning 1265 Data truncated for column 'c0' at row 1 +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c20' at row 1 +Warning 1265 Data truncated for column 'c255' at row 1 +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'c' at row 5 +Warning 1265 Data truncated for column 'c0' at row 5 +Warning 1265 Data truncated for column 'c1' at row 5 +Warning 1265 Data truncated for column 'c20' at row 5 +Warning 1265 Data truncated for column 'c' at row 6 +Warning 1265 Data truncated for column 'c0' at row 6 +Warning 1265 Data truncated for column 'c1' at row 6 +Warning 1265 Data truncated for column 'c20' at row 6 +Warning 1265 Data truncated for column 'c255' at row 6 +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + + a +C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b +a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; +c20 REPEAT('a',LENGTH(c20)) COUNT(*) + 2 +Creating an article aaaaaaaaaaaaaaaaaaa 1 +aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1 +abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1 +xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1 +ALTER TABLE t1 ADD COLUMN c257 CHAR(257) NULL; +ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR NULL, +c1 CHAR NULL DEFAULT NULL, +c2 CHAR NULL DEFAULT '_', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) YES NULL +c1 char(1) YES NULL +c2 char(1) YES _ +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('_','_','_'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 5F 5F 5F +3 NULL NULL 5F +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 5F +3 5F +DROP TABLE t1; +######################## +# VARCHAR columns +######################## +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARCHAR(0) NULL, +v1 VARCHAR(1) NULL, +v64 VARCHAR(64) NULL, +v65000 VARCHAR(65000) NULL, +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) YES NULL +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varchar(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT v0,v1,v64,v65000 FROM t1; +v0 v1 v64 v65000 + + + + + + + + + + + + y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly) + o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject. + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Free to read in the Knowledgebase! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + o The 'default' book to read if you wont to learn to use MySQL / MariaDB. + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + * MariaDB Crash Course by Ben Forta + * MySQL (4th Edition) by Paul DuBois + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + * MySQL Admin Cookbook + * MySQL Cookbook by Paul DuBois + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + For MariaDB / MySQL end users + For developers who want to code on MariaDB or MySQL + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v65000' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; +v0 v1 v64 LENGTH(v65000) + 0 + a 0 + H aHere is a list of recommended books on MariaDB and MySQL. We've 2966 + a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000 + y Once there, double check that an article doesn't already exist 2965 + y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000 +ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) NULL; +Warnings: +Note 1246 Converting column 'v65536' from VARCHAR to TEXT +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) YES NULL +v65536 mediumtext YES NULL +DROP TABLE t1, t2; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c VARCHAR(64) NULL, +c1 VARCHAR(64) NULL DEFAULT NULL, +c2 VARCHAR(64) NULL DEFAULT 'test default', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c varchar(64) YES NULL +c1 varchar(64) YES NULL +c2 varchar(64) YES test default +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test default','test default','test default'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 746573742064656661756C74 746573742064656661756C74 746573742064656661756C74 +3 NULL NULL 746573742064656661756C74 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 746573742064656661756C74 +3 746573742064656661756C74 +DROP TABLE t1; +######################## +# date and time columns +######################## +set @col_opt_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; +DROP TABLE IF EXISTS t1; +set @save_time_zone=@@time_zone; +set time_zone='UTC'; +CREATE TABLE t1 ( +d DATE NULL, +dt DATETIME NULL, +ts TIMESTAMP NULL, +t TIME NULL, +y YEAR NULL, +y4 YEAR(4) NULL, +y2 YEAR(2) NULL, +pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d date YES NULL +dt datetime YES NULL +ts timestamp YES NULL +t time YES NULL +y year(4) YES NULL +y4 year(4) YES NULL +y2 year(2) YES NULL +pk datetime NO PRI NULL +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); +Warnings: +Warning 1265 Data truncated for column 'd' at row 1 +Warning 1265 Data truncated for column 'dt' at row 1 +Warning 1265 Data truncated for column 'ts' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 'y' at row 1 +Warning 1264 Out of range value for column 'y4' at row 1 +Warning 1264 Out of range value for column 'y2' at row 1 +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 +set time_zone=@save_time_zone; +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DATE NULL, +c1 DATE NULL DEFAULT NULL, +c2 DATE NULL DEFAULT '2012-12-21', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c date YES NULL +c1 date YES NULL +c2 date YES 2012-12-21 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21','2012-12-21','2012-12-21'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 323031322D31322D3231 323031322D31322D3231 323031322D31322D3231 +3 NULL NULL 323031322D31322D3231 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 323031322D31322D3231 +3 323031322D31322D3231 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DATETIME NULL, +c1 DATETIME NULL DEFAULT NULL, +c2 DATETIME NULL DEFAULT '2012-12-21 12:21:12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c datetime YES NULL +c1 datetime YES NULL +c2 datetime YES 2012-12-21 12:21:12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21 12:21:12','2012-12-21 12:21:12','2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 +3 NULL NULL 323031322D31322D32312031323A32313A3132 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 323031322D31322D32312031323A32313A3132 +3 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TIMESTAMP NULL, +c1 TIMESTAMP NULL DEFAULT NULL, +c2 TIMESTAMP NULL DEFAULT '2012-12-21 12:21:12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c timestamp YES NULL +c1 timestamp YES NULL +c2 timestamp YES 2012-12-21 12:21:12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012-12-21 12:21:12','2012-12-21 12:21:12','2012-12-21 12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 323031322D31322D32312031323A32313A3132 +3 NULL NULL 323031322D31322D32312031323A32313A3132 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 323031322D31322D32312031323A32313A3132 +3 323031322D31322D32312031323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TIME NULL, +c1 TIME NULL DEFAULT NULL, +c2 TIME NULL DEFAULT '12:21:12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c time YES NULL +c1 time YES NULL +c2 time YES 12:21:12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('12:21:12','12:21:12','12:21:12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 31323A32313A3132 31323A32313A3132 31323A32313A3132 +3 NULL NULL 31323A32313A3132 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 31323A32313A3132 +3 31323A32313A3132 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c YEAR NULL, +c1 YEAR NULL DEFAULT NULL, +c2 YEAR NULL DEFAULT '2012', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c year(4) YES NULL +c1 year(4) YES NULL +c2 year(4) YES 2012 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('2012','2012','2012'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7DC 7DC 7DC +3 NULL NULL 7DC +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7DC +3 7DC +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c YEAR(2) NULL, +c1 YEAR(2) NULL DEFAULT NULL, +c2 YEAR(2) NULL DEFAULT '12', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c year(2) YES NULL +c1 year(2) YES NULL +c2 year(2) YES 12 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('12','12','12'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 C C C +3 NULL NULL C +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 C +3 C +DROP TABLE t1; +set time_zone=@col_opt_nullsave_time_zone; +######################## +# ENUM columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('') NULL, +b ENUM('test1','test2','test3','test4','test5') NULL, +c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') NULL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; +a b c + test2 4 + test5 2 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + + test2 4 + test5 2 +ALTER TABLE t1 ADD COLUMN e ENUM('a','A') NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in ENUM +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +e enum('a','A') YES NULL +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +SELECT a,b,c,e FROM t1; +a b c e + NULL + test2 4 NULL + test3 75 a + test5 2 NULL +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; +a b c e + test2 4 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c ENUM('test1','test2','test3') NULL, +c1 ENUM('test1','test2','test3') NULL DEFAULT NULL, +c2 ENUM('test1','test2','test3') NULL DEFAULT 'test2', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c enum('test1','test2','test3') YES NULL +c1 enum('test1','test2','test3') YES NULL +c2 enum('test1','test2','test3') YES test2 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test2','test2','test2'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7465737432 7465737432 7465737432 +3 NULL NULL 7465737432 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7465737432 +3 7465737432 +DROP TABLE t1; +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL NULL, +d0 DECIMAL(0) NULL, +d1_1 DECIMAL(1,1) NULL, +d10_2 DECIMAL(10,2) NULL, +d60_10 DECIMAL(60,10) NULL, +n NUMERIC NULL, +n0_0 NUMERIC(0,0) NULL, +n1 NUMERIC(1) NULL, +n20_4 NUMERIC(20,4) NULL, +n65_4 NUMERIC(65,4) NULL, +pk NUMERIC NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) YES NULL +d0 decimal(10,0) YES NULL +d1_1 decimal(1,1) YES NULL +d10_2 decimal(10,2) YES NULL +d60_10 decimal(60,10) YES NULL +n decimal(10,0) YES NULL +n0_0 decimal(10,0) YES NULL +n1 decimal(1,0) YES NULL +n20_4 decimal(20,4) YES NULL +n65_4 decimal(65,4) YES NULL +pk decimal(10,0) NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) NULL; +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) NULL; +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) NULL; +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DECIMAL NULL, +c1 DECIMAL NULL DEFAULT NULL, +c2 DECIMAL NULL DEFAULT 1.1, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Note 1265 Data truncated for column 'c2' at row 1 +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c decimal(10,0) YES NULL +c1 decimal(10,0) YES NULL +c2 decimal(10,0) YES 1 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1.1,1.1,1.1); +Warnings: +Note 1265 Data truncated for column 'c' at row 1 +Note 1265 Data truncated for column 'c1' at row 1 +Note 1265 Data truncated for column 'c2' at row 1 +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c NUMERIC NULL, +c1 NUMERIC NULL DEFAULT NULL, +c2 NUMERIC NULL DEFAULT 0 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c decimal(10,0) YES NULL +c1 decimal(10,0) YES NULL +c2 decimal(10,0) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0 ,0 ,0 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT NULL, +f0 FLOAT(0) NULL, +r1_1 REAL(1,1) NULL, +f23_0 FLOAT(23) NULL, +f20_3 FLOAT(20,3) NULL, +d DOUBLE NULL, +d1_0 DOUBLE(1,0) NULL, +d10_10 DOUBLE PRECISION (10,10) NULL, +d53 DOUBLE(53,0) NULL, +d53_10 DOUBLE(53,10) NULL, +pk DOUBLE NULL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float YES NULL +f0 float YES NULL +r1_1 double(1,1) YES NULL +f23_0 float YES NULL +f20_3 float(20,3) YES NULL +d double YES NULL +d1_0 double(1,0) YES NULL +d10_10 double(10,10) YES NULL +d53 double(53,0) YES NULL +d53_10 double(53,10) YES NULL +pk double NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) NULL; +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) NULL; +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) NULL; +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c FLOAT NULL, +c1 FLOAT NULL DEFAULT NULL, +c2 FLOAT NULL DEFAULT 1.1 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c float YES NULL +c1 float YES NULL +c2 float YES 1.1 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1.1 ,1.1 ,1.1 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c DOUBLE NULL, +c1 DOUBLE NULL DEFAULT NULL, +c2 DOUBLE NULL DEFAULT 0 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c double YES NULL +c1 double YES NULL +c2 double YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0 ,0 ,0 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT NULL, +i0 INT(0) NULL, +i1 INT(1) NULL, +i20 INT(20) NULL, +t TINYINT NULL, +t0 TINYINT(0) NULL, +t1 TINYINT(1) NULL, +t20 TINYINT(20) NULL, +s SMALLINT NULL, +s0 SMALLINT(0) NULL, +s1 SMALLINT(1) NULL, +s20 SMALLINT(20) NULL, +m MEDIUMINT NULL, +m0 MEDIUMINT(0) NULL, +m1 MEDIUMINT(1) NULL, +m20 MEDIUMINT(20) NULL, +b BIGINT NULL, +b0 BIGINT(0) NULL, +b1 BIGINT(1) NULL, +b20 BIGINT(20) NULL, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(11) YES NULL +i0 int(11) YES NULL +i1 int(1) YES NULL +i20 int(20) YES NULL +t tinyint(4) YES NULL +t0 tinyint(4) YES NULL +t1 tinyint(1) YES NULL +t20 tinyint(20) YES NULL +s smallint(6) YES NULL +s0 smallint(6) YES NULL +s1 smallint(1) YES NULL +s20 smallint(20) YES NULL +m mediumint(9) YES NULL +m0 mediumint(9) YES NULL +m1 mediumint(1) YES NULL +m20 mediumint(20) YES NULL +b bigint(20) YES NULL +b0 bigint(20) YES NULL +b1 bigint(1) YES NULL +b20 bigint(20) YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +Warning 1264 Out of range value for column 'i' at row 11 +Warning 1264 Out of range value for column 'i0' at row 11 +Warning 1264 Out of range value for column 'i1' at row 11 +Warning 1264 Out of range value for column 'i20' at row 11 +Warning 1264 Out of range value for column 't' at row 11 +Warning 1264 Out of range value for column 't0' at row 11 +Warning 1264 Out of range value for column 't1' at row 11 +Warning 1264 Out of range value for column 't20' at row 11 +Warning 1264 Out of range value for column 's' at row 11 +Warning 1264 Out of range value for column 's0' at row 11 +Warning 1264 Out of range value for column 's1' at row 11 +Warning 1264 Out of range value for column 's20' at row 11 +Warning 1264 Out of range value for column 'm' at row 11 +Warning 1264 Out of range value for column 'm0' at row 11 +Warning 1264 Out of range value for column 'm1' at row 11 +Warning 1264 Out of range value for column 'm20' at row 11 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) NULL; +ERROR 42000: Display width out of range for 'i257' (max = 255) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c INT NULL, +c1 INT NULL DEFAULT NULL, +c2 INT NULL DEFAULT 2147483647, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c int(11) YES NULL +c1 int(11) YES NULL +c2 int(11) YES 2147483647 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (2147483647,2147483647,2147483647); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7FFFFFFF 7FFFFFFF 7FFFFFFF +3 NULL NULL 7FFFFFFF +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7FFFFFFF +3 7FFFFFFF +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TINYINT NULL, +c1 TINYINT NULL DEFAULT NULL, +c2 TINYINT NULL DEFAULT 127 , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinyint(4) YES NULL +c1 tinyint(4) YES NULL +c2 tinyint(4) YES 127 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (127 ,127 ,127 ); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7F 7F 7F +3 NULL NULL 7F +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7F +3 7F +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c SMALLINT NULL, +c1 SMALLINT NULL DEFAULT NULL, +c2 SMALLINT NULL DEFAULT 0, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c smallint(6) YES NULL +c1 smallint(6) YES NULL +c2 smallint(6) YES 0 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (0,0,0); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 0 0 0 +3 NULL NULL 0 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 0 +3 0 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c MEDIUMINT NULL, +c1 MEDIUMINT NULL DEFAULT NULL, +c2 MEDIUMINT NULL DEFAULT 1, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c mediumint(9) YES NULL +c1 mediumint(9) YES NULL +c2 mediumint(9) YES 1 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (1,1,1); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 1 1 1 +3 NULL NULL 1 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 1 +3 1 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c BIGINT NULL, +c1 BIGINT NULL DEFAULT NULL, +c2 BIGINT NULL DEFAULT 9223372036854775807, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c bigint(20) YES NULL +c1 bigint(20) YES NULL +c2 bigint(20) YES 9223372036854775807 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES (9223372036854775807,9223372036854775807,9223372036854775807); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 7FFFFFFFFFFFFFFF 7FFFFFFFFFFFFFFF 7FFFFFFFFFFFFFFF +3 NULL NULL 7FFFFFFFFFFFFFFF +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 7FFFFFFFFFFFFFFF +3 7FFFFFFFFFFFFFFF +DROP TABLE t1; +######################## +# SET columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('') NULL, +b SET('test1','test2','test3','test4','test5') NULL, +c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NULL, +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64 + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +ALTER TABLE t1 ADD COLUMN e SET('a','A') NULL; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in SET +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +e set('a','A') YES NULL +ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') NULL; +ERROR HY000: Too many strings for column f and SET +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; +a b c e + test2,test3 01,23,34,44 NULL + test2,test4 NULL +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c SET('test1','test2','test3') NULL, +c1 SET('test1','test2','test3') NULL DEFAULT NULL, +c2 SET('test1','test2','test3') NULL DEFAULT 'test2,test3', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c set('test1','test2','test3') YES NULL +c1 set('test1','test2','test3') YES NULL +c2 set('test1','test2','test3') YES test2,test3 +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('test2,test3','test2,test3','test2,test3'); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 74657374322C7465737433 74657374322C7465737433 74657374322C7465737433 +3 NULL NULL 74657374322C7465737433 +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 74657374322C7465737433 +3 74657374322C7465737433 +DROP TABLE t1; +######################## +# TEXT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +t TEXT NULL, +t0 TEXT(0) NULL, +t1 TEXT(1) NULL, +t300 TEXT(300) NULL, +tm TEXT(65535) NULL, +t70k TEXT(70000) NULL, +t17m TEXT(17000000) NULL, +tt TINYTEXT NULL, +m MEDIUMTEXT NULL, +l LONGTEXT NULL +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +t text YES NULL +t0 text YES NULL +t1 tinytext YES NULL +t300 text YES NULL +tm text YES NULL +t70k mediumtext YES NULL +t17m longtext YES NULL +tt tinytext YES NULL +m mediumtext YES NULL +l longtext YES NULL +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 't' at row 1 +Warning 1265 Data truncated for column 't0' at row 1 +Warning 1265 Data truncated for column 't1' at row 1 +Warning 1265 Data truncated for column 't300' at row 1 +Warning 1265 Data truncated for column 'tm' at row 1 +Warning 1265 Data truncated for column 'tt' at row 1 +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) NULL; +ERROR 42000: Display width out of range for 'ttt' (max = 4294967295) +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TEXT NULL, +c1 TEXT NULL DEFAULT NULL, +c2 TEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c text YES NULL +c1 text YES NULL +c2 text YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c TINYTEXT NULL, +c1 TINYTEXT NULL DEFAULT NULL, +c2 TINYTEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c tinytext YES NULL +c1 tinytext YES NULL +c2 tinytext YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c MEDIUMTEXT NULL, +c1 MEDIUMTEXT NULL DEFAULT NULL, +c2 MEDIUMTEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c mediumtext YES NULL +c1 mediumtext YES NULL +c2 mediumtext YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c LONGTEXT NULL, +c1 LONGTEXT NULL DEFAULT NULL, +c2 LONGTEXT NULL DEFAULT '', +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c longtext YES NULL +c1 longtext YES NULL +c2 longtext YES '' +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +INSERT INTO t1 (c,c1,c2) VALUES ('','',''); +INSERT INTO t1 () VALUES (); +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c) HEX(c1) HEX(c2) +1 NULL NULL NULL +2 +3 NULL NULL +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; +pk HEX(c2) +1 NULL +2 +3 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result new file mode 100644 index 0000000000000..ef85ad1c23781 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_unsigned.result @@ -0,0 +1,739 @@ +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL UNSIGNED, +d0 DECIMAL(0) UNSIGNED, +d1_1 DECIMAL(1,1) UNSIGNED, +d10_2 DECIMAL(10,2) UNSIGNED, +d60_10 DECIMAL(60,10) UNSIGNED, +n NUMERIC UNSIGNED, +n0_0 NUMERIC(0,0) UNSIGNED, +n1 NUMERIC(1) UNSIGNED, +n20_4 NUMERIC(20,4) UNSIGNED, +n65_4 NUMERIC(65,4) UNSIGNED, +pk NUMERIC UNSIGNED PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) unsigned YES NULL +d0 decimal(10,0) unsigned YES NULL +d1_1 decimal(1,1) unsigned YES NULL +d10_2 decimal(10,2) unsigned YES NULL +d60_10 decimal(60,10) unsigned YES NULL +n decimal(10,0) unsigned YES NULL +n0_0 decimal(10,0) unsigned YES NULL +n1 decimal(1,0) unsigned YES NULL +n20_4 decimal(20,4) unsigned YES NULL +n65_4 decimal(65,4) unsigned YES NULL +pk decimal(10,0) unsigned NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) UNSIGNED; +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) UNSIGNED; +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) UNSIGNED; +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 +DROP TABLE t1; +CREATE TABLE t1 ( +a DECIMAL UNSIGNED, +b NUMERIC UNSIGNED, +PRIMARY KEY (a) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a decimal(10,0) unsigned NO PRI NULL +b decimal(10,0) unsigned YES NULL +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +Warnings: +Warning 1264 Out of range value for column 'b' at row 1 +INSERT INTO t1 (a,b) VALUES (-100,100); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a,b FROM t1; +a b +0 100 +1 0 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT UNSIGNED, +f0 FLOAT(0) UNSIGNED, +r1_1 REAL(1,1) UNSIGNED, +f23_0 FLOAT(23) UNSIGNED, +f20_3 FLOAT(20,3) UNSIGNED, +d DOUBLE UNSIGNED, +d1_0 DOUBLE(1,0) UNSIGNED, +d10_10 DOUBLE PRECISION (10,10) UNSIGNED, +d53 DOUBLE(53,0) UNSIGNED, +d53_10 DOUBLE(53,10) UNSIGNED, +pk DOUBLE UNSIGNED PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float unsigned YES NULL +f0 float unsigned YES NULL +r1_1 double(1,1) unsigned YES NULL +f23_0 float unsigned YES NULL +f20_3 float(20,3) unsigned YES NULL +d double unsigned YES NULL +d1_0 double(1,0) unsigned YES NULL +d10_10 double(10,10) unsigned YES NULL +d53 double(53,0) unsigned YES NULL +d53_10 double(53,10) unsigned YES NULL +pk double unsigned NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 0 +f 1e38 +f0 0 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 0 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 0 +f 1e38 +f 3.40282e38 +f0 0 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 0.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 0 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 0 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 0.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) UNSIGNED; +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) UNSIGNED; +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) UNSIGNED; +DROP TABLE t1; +CREATE TABLE t1 ( +a DOUBLE UNSIGNED, +b FLOAT UNSIGNED, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a double unsigned YES NULL +b float unsigned NO PRI NULL +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +Warnings: +Warning 1264 Out of range value for column 'b' at row 1 +INSERT INTO t1 (a,b) VALUES (-100,100); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a,b FROM t1; +a b +0 100 +1 0 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT UNSIGNED, +i0 INT(0) UNSIGNED, +i1 INT(1) UNSIGNED, +i20 INT(20) UNSIGNED, +t TINYINT UNSIGNED, +t0 TINYINT(0) UNSIGNED, +t1 TINYINT(1) UNSIGNED, +t20 TINYINT(20) UNSIGNED, +s SMALLINT UNSIGNED, +s0 SMALLINT(0) UNSIGNED, +s1 SMALLINT(1) UNSIGNED, +s20 SMALLINT(20) UNSIGNED, +m MEDIUMINT UNSIGNED, +m0 MEDIUMINT(0) UNSIGNED, +m1 MEDIUMINT(1) UNSIGNED, +m20 MEDIUMINT(20) UNSIGNED, +b BIGINT UNSIGNED, +b0 BIGINT(0) UNSIGNED, +b1 BIGINT(1) UNSIGNED, +b20 BIGINT(20) UNSIGNED, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(10) unsigned YES NULL +i0 int(10) unsigned YES NULL +i1 int(1) unsigned YES NULL +i20 int(20) unsigned YES NULL +t tinyint(3) unsigned YES NULL +t0 tinyint(3) unsigned YES NULL +t1 tinyint(1) unsigned YES NULL +t20 tinyint(20) unsigned YES NULL +s smallint(5) unsigned YES NULL +s0 smallint(5) unsigned YES NULL +s1 smallint(1) unsigned YES NULL +s20 smallint(20) unsigned YES NULL +m mediumint(8) unsigned YES NULL +m0 mediumint(8) unsigned YES NULL +m1 mediumint(1) unsigned YES NULL +m20 mediumint(20) unsigned YES NULL +b bigint(20) unsigned YES NULL +b0 bigint(20) unsigned YES NULL +b1 bigint(1) unsigned YES NULL +b20 bigint(20) unsigned YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 4294967295 255 255 255 255 65535 65535 65535 65535 16777215 16777215 16777215 16777215 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) UNSIGNED; +ERROR 42000: Display width out of range for 'i257' (max = 255) +DROP TABLE t1; +CREATE TABLE t1 ( +t TINYINT UNSIGNED, +s SMALLINT UNSIGNED, +m MEDIUMINT UNSIGNED, +i INT UNSIGNED, +b BIGINT UNSIGNED, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +t tinyint(3) unsigned YES NULL +s smallint(5) unsigned YES NULL +m mediumint(8) unsigned YES NULL +i int(10) unsigned YES NULL +b bigint(20) unsigned NO PRI NULL +INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615); +INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1); +Warnings: +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +SELECT t,s,m,i,b FROM t1; +t s m i b +0 0 0 0 0 +255 65535 16777215 4294967295 18446744073709551615 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result new file mode 100644 index 0000000000000..1f9bf16ecfce3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/col_opt_zerofill.result @@ -0,0 +1,721 @@ +######################## +# Fixed point columns (NUMERIC, DECIMAL) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL ZEROFILL, +d0 DECIMAL(0) ZEROFILL, +d1_1 DECIMAL(1,1) ZEROFILL, +d10_2 DECIMAL(10,2) ZEROFILL, +d60_10 DECIMAL(60,10) ZEROFILL, +n NUMERIC ZEROFILL, +n0_0 NUMERIC(0,0) ZEROFILL, +n1 NUMERIC(1) ZEROFILL, +n20_4 NUMERIC(20,4) ZEROFILL, +n65_4 NUMERIC(65,4) ZEROFILL, +pk NUMERIC ZEROFILL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) unsigned zerofill YES NULL +d0 decimal(10,0) unsigned zerofill YES NULL +d1_1 decimal(1,1) unsigned zerofill YES NULL +d10_2 decimal(10,2) unsigned zerofill YES NULL +d60_10 decimal(60,10) unsigned zerofill YES NULL +n decimal(10,0) unsigned zerofill YES NULL +n0_0 decimal(10,0) unsigned zerofill YES NULL +n1 decimal(1,0) unsigned zerofill YES NULL +n20_4 decimal(20,4) unsigned zerofill YES NULL +n65_4 decimal(65,4) unsigned zerofill YES NULL +pk decimal(10,0) unsigned zerofill NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000000 0000000000 0.0 00000000.00 00000000000000000000000000000000000000000000000000.0000000000 0000000000 0000000000 0 0000000000000000.0000 0000000000000000000000000000000000000000000000000000000000000.0000 +0000000100 0000123456 0.3 00040000.25 00000000000000000000000000000000123456789123456789.1000100000 0000001024 0000007000 8 0000000000999999.9000 0000000000000000000000000000000000000000009223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ZEROFILL; +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ZEROFILL; +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ZEROFILL; +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 +DROP TABLE t1; +CREATE TABLE t1 ( +a DECIMAL ZEROFILL, +b NUMERIC ZEROFILL, +PRIMARY KEY (a) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a decimal(10,0) unsigned zerofill NO PRI NULL +b decimal(10,0) unsigned zerofill YES NULL +INSERT INTO t1 (a,b) VALUES (1.1,1234); +Warnings: +Note 1265 Data truncated for column 'a' at row 1 +SELECT a,b FROM t1; +a b +0000000001 0000001234 +DROP TABLE t1; +######################## +# Floating point columns (FLOAT, DOUBLE) +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT ZEROFILL, +f0 FLOAT(0) ZEROFILL, +r1_1 REAL(1,1) ZEROFILL, +f23_0 FLOAT(23) ZEROFILL, +f20_3 FLOAT(20,3) ZEROFILL, +d DOUBLE ZEROFILL, +d1_0 DOUBLE(1,0) ZEROFILL, +d10_10 DOUBLE PRECISION (10,10) ZEROFILL, +d53 DOUBLE(53,0) ZEROFILL, +d53_10 DOUBLE(53,10) ZEROFILL, +pk DOUBLE ZEROFILL PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float unsigned zerofill YES NULL +f0 float unsigned zerofill YES NULL +r1_1 double(1,1) unsigned zerofill YES NULL +f23_0 float unsigned zerofill YES NULL +f20_3 float(20,3) unsigned zerofill YES NULL +d double unsigned zerofill YES NULL +d1_0 double(1,0) unsigned zerofill YES NULL +d10_10 double(10,10) unsigned zerofill YES NULL +d53 double(53,0) unsigned zerofill YES NULL +d53_10 double(53,10) unsigned zerofill YES NULL +pk double unsigned zerofill NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 000000000011111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 00000000000000000000000000000000001234566789123456800 +d53_10 000000000000000000000000100000000000000000.0000000000 +f0 0000012345.1 +f20_3 0000000000056789.988 +f23_0 000123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000001e38 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000000000 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000000000 +f 000000001e38 +f0 000000000000 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f20_3 0000000000000000.000 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000000000 +d 0000000000000000001e61 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000000000 +f 000000001e38 +f 003.40282e38 +f0 000000000000 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f0 003.40282e38 +f20_3 0000000000000000.000 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +f23_0 003.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 0000012345.1 +d 0000000000000000000000 +d 0000000000000000000000 +d 0000000000000000001e61 +d 0000000000000000001e65 +d 0000000000000000001e81 +d 000000000011111111.111 +d10_10 0.0000000000 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 0 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000000000000000000000000 +d53 00000000000000000000000000000000001234566789123456800 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000000000000000000000.0000000000 +d53_10 000000000000000000000000100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 000000000000 +f 000000000000 +f 000000001e38 +f 003.40282e38 +f 003.40282e38 +f0 000000000000 +f0 000000000000 +f0 000000001e38 +f0 0000012345.1 +f0 003.40282e38 +f0 003.40282e38 +f20_3 0000000000000000.000 +f20_3 0000000000000000.000 +f20_3 0000000000056789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 000000000000 +f23_0 000000000000 +f23_0 000000001e38 +f23_0 000123457000 +f23_0 003.40282e38 +f23_0 003.40282e38 +r1_1 0.0 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ZEROFILL; +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ZEROFILL; +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ZEROFILL; +DROP TABLE t1; +CREATE TABLE t1 ( +a DOUBLE ZEROFILL, +b FLOAT ZEROFILL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a double unsigned zerofill YES NULL +b float unsigned zerofill NO PRI NULL +INSERT INTO t1 (a,b) VALUES (1,1234.5); +SELECT a,b FROM t1; +a b +0000000000000000000001 0000001234.5 +DROP TABLE t1; +######################## +# INT columns +######################## +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT ZEROFILL, +i0 INT(0) ZEROFILL, +i1 INT(1) ZEROFILL, +i20 INT(20) ZEROFILL, +t TINYINT ZEROFILL, +t0 TINYINT(0) ZEROFILL, +t1 TINYINT(1) ZEROFILL, +t20 TINYINT(20) ZEROFILL, +s SMALLINT ZEROFILL, +s0 SMALLINT(0) ZEROFILL, +s1 SMALLINT(1) ZEROFILL, +s20 SMALLINT(20) ZEROFILL, +m MEDIUMINT ZEROFILL, +m0 MEDIUMINT(0) ZEROFILL, +m1 MEDIUMINT(1) ZEROFILL, +m20 MEDIUMINT(20) ZEROFILL, +b BIGINT ZEROFILL, +b0 BIGINT(0) ZEROFILL, +b1 BIGINT(1) ZEROFILL, +b20 BIGINT(20) ZEROFILL, +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(10) unsigned zerofill YES NULL +i0 int(10) unsigned zerofill YES NULL +i1 int(1) unsigned zerofill YES NULL +i20 int(20) unsigned zerofill YES NULL +t tinyint(3) unsigned zerofill YES NULL +t0 tinyint(3) unsigned zerofill YES NULL +t1 tinyint(1) unsigned zerofill YES NULL +t20 tinyint(20) unsigned zerofill YES NULL +s smallint(5) unsigned zerofill YES NULL +s0 smallint(5) unsigned zerofill YES NULL +s1 smallint(1) unsigned zerofill YES NULL +s20 smallint(20) unsigned zerofill YES NULL +m mediumint(8) unsigned zerofill YES NULL +m0 mediumint(8) unsigned zerofill YES NULL +m1 mediumint(1) unsigned zerofill YES NULL +m20 mediumint(20) unsigned zerofill YES NULL +b bigint(20) unsigned zerofill YES NULL +b0 bigint(20) unsigned zerofill YES NULL +b1 bigint(1) unsigned zerofill YES NULL +b20 bigint(20) unsigned zerofill YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020 +2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020 +2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000000 0000000000 0 00000000000000000000 000 000 0 00000000000000000000 00000 00000 0 00000000000000000000 00000000 00000000 0 00000000000000000000 00000000000000000000 00000000000000000000 0 00000000000000000000 +0000000001 0000000002 3 00000000000000000004 005 006 7 00000000000000000008 00009 00010 11 00000000000000000012 00000013 00000014 15 00000000000000000016 00000000000000000017 00000000000000000018 19 00000000000000000020 +2147483647 2147483647 2147483647 00000000002147483647 127 127 127 00000000000000000127 32767 32767 32767 00000000000000032767 08388607 08388607 8388607 00000000000008388607 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 09223372036854775807 09223372036854775807 9223372036854775807 09223372036854775807 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +4294967295 4294967295 4294967295 00000000004294967295 255 255 255 00000000000000000255 65535 65535 65535 00000000000000065535 16777215 16777215 16777215 00000000000016777215 18446744073709551615 18446744073709551615 18446744073709551615 18446744073709551615 +ALTER TABLE t1 ADD COLUMN i257 INT(257) ZEROFILL; +ERROR 42000: Display width out of range for 'i257' (max = 255) +DROP TABLE t1; +CREATE TABLE t1 ( +t TINYINT ZEROFILL, +s SMALLINT ZEROFILL, +m MEDIUMINT ZEROFILL, +i INT ZEROFILL, +b BIGINT ZEROFILL, +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +t tinyint(3) unsigned zerofill YES NULL +s smallint(5) unsigned zerofill YES NULL +m mediumint(8) unsigned zerofill YES NULL +i int(10) unsigned zerofill YES NULL +b bigint(20) unsigned zerofill NO PRI NULL +INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0); +SELECT t,s,m,i,b FROM t1; +t s m i b +001 00010 00000100 0000001000 00000000000000000000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation.result b/storage/rocksdb/mysql-test/rocksdb/r/collation.result new file mode 100644 index 0000000000000..e372cbe210908 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation.result @@ -0,0 +1,130 @@ +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (latin1_bin, binary, utf8_bin). +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t1.value3 Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_check=0; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +DROP TABLE t1; +SET GLOBAL rocksdb_strict_collation_check=1; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin; +DROP TABLE t1; +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin; +DROP TABLE t1; +CREATE TABLE t1 (id varchar(20) collate latin1_bin, value varchar(50) collate utf8_bin, value2 varchar(50) collate latin1_bin, value3 text, primary key (id), index(value, value2)) engine=rocksdb; +DROP TABLE t1; +SET GLOBAL rocksdb_strict_collation_exceptions=t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="t.*"; +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t123; +CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.s123.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions=".t.*"; +CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE xt123; +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.t123.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.u1.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions='t1'; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +ALTER TABLE t1 AUTO_INCREMENT=1; +DROP TABLE t1; +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). +CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb; +ALTER TABLE t2 ADD INDEX(value); +ERROR HY000: Unsupported collation on string indexed column test.t2.value Use binary collation (latin1_bin, binary, utf8_bin). +DROP TABLE t2; +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; +FOUND 1 /Invalid pattern in strict_collation_exceptions: \[a-b/ in mysqld.1.err +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.a.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.c.value Use binary collation (latin1_bin, binary, utf8_bin). +DROP TABLE a, b; +call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); +SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; +FOUND 1 /Invalid pattern in strict_collation_exceptions: abc/ in mysqld.1.err +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.abc.value Use binary collation (latin1_bin, binary, utf8_bin). +SET GLOBAL rocksdb_strict_collation_exceptions="abc"; +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (latin1_bin, binary, utf8_bin). +DROP TABLE abc; +SET GLOBAL rocksdb_strict_collation_exceptions=null; +SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result b/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result new file mode 100644 index 0000000000000..83d72d6c4499d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/collation_exception.result @@ -0,0 +1,25 @@ +CREATE TABLE `r1.lol` ( +`c1` int(10) NOT NULL DEFAULT '0', +`c2` int(11) NOT NULL DEFAULT '0', +`c3` int(1) NOT NULL DEFAULT '0', +`c4` int(11) NOT NULL DEFAULT '0', +`c5` int(11) NOT NULL DEFAULT '0', +`c6` varchar(100) NOT NULL DEFAULT '', +`c7` varchar(100) NOT NULL DEFAULT '', +`c8` varchar(255) NOT NULL DEFAULT '', +`c9` int(10) NOT NULL DEFAULT '125', +`c10` int(10) NOT NULL DEFAULT '125', +`c11` text NOT NULL, +`c12` int(11) NOT NULL DEFAULT '0', +`c13` int(10) NOT NULL DEFAULT '0', +`c14` text NOT NULL, +`c15` blob NOT NULL, +`c16` int(11) NOT NULL DEFAULT '0', +`c17` int(11) NOT NULL DEFAULT '0', +`c18` int(11) NOT NULL DEFAULT '0', +PRIMARY KEY (`c1`), +KEY i1 (`c4`), +KEY i2 (`c7`), +KEY i3 (`c2`)) ENGINE=RocksDB DEFAULT CHARSET=latin1; +DROP INDEX i1 ON `r1.lol`; +DROP TABLE `r1.lol`; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result new file mode 100644 index 0000000000000..4d64d12816f6b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/commit_in_the_middle_ddl.result @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS a; +create table a (id int, value int, primary key (id) comment 'cf_a') engine=rocksdb; +set rocksdb_bulk_load=1; +set rocksdb_commit_in_the_middle=1; +alter table a add index v (value) COMMENT 'cf_a'; +set rocksdb_bulk_load=0; +set rocksdb_commit_in_the_middle=0; +select count(*) from a force index(primary); +count(*) +100000 +select count(*) from a force index(v); +count(*) +100000 +DROP TABLE a; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result new file mode 100644 index 0000000000000..5b3cfaf783967 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/compact_deletes.result @@ -0,0 +1,78 @@ +DROP TABLE IF EXISTS r1; +create table r1 ( +id1 int, +id2 int, +type int, +value varchar(100), +value2 int, +value3 int, +primary key (type, id1, id2), +index id1_type (id1, type, value2, value, id2) +) engine=rocksdb collate latin1_bin; +select 'loading data'; +loading data +loading data +set global rocksdb_force_flush_memtable_now=1; +optimize table r1; +Table Op Msg_type Msg_text +test.r1 optimize status OK +Test 1: Do a bunch of updates without setting the compaction sysvar +Expect: no compaction +set global rocksdb_compaction_sequential_deletes_window=0; +set global rocksdb_compaction_sequential_deletes= 0; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +wait_for_delete: 0 +There are deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 2: Do a bunch of updates and set the compaction sysvar +Expect: compaction +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 990; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +wait_for_delete: 1 +No more deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large +Expect: no compaction +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 1000; +set global rocksdb_compaction_sequential_deletes_file_size=1000000; +set global rocksdb_force_flush_memtable_now=1; +wait_for_delete: 0 +There are deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 4: Do a bunch of secondary key updates and set the compaction sysvar +Expect: compaction +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 50; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +wait_for_delete: 1 +No more deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +Test 5: Do a bunch of secondary key updates and set the compaction sysvar, +and rocksdb_compaction_sequential_deletes_count_sd turned on +Expect: compaction +SET @save_rocksdb_compaction_sequential_deletes_count_sd = @@global.rocksdb_compaction_sequential_deletes_count_sd; +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= ON; +set global rocksdb_compaction_sequential_deletes_window=1000; +set global rocksdb_compaction_sequential_deletes= 50; +set global rocksdb_compaction_sequential_deletes_file_size=0; +set global rocksdb_force_flush_memtable_now=1; +wait_for_delete: 1 +No more deletes left +SET GLOBAL rocksdb_compaction_sequential_deletes= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_window= 0; +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= @save_rocksdb_compaction_sequential_deletes_count_sd; +drop table r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result new file mode 100644 index 0000000000000..62a6dbbdaca71 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result @@ -0,0 +1,2 @@ +create table t (id int primary key) engine=rocksdb; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result b/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result new file mode 100644 index 0000000000000..396f80a2ecbae --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/concurrent_alter.result @@ -0,0 +1,12 @@ +DROP DATABASE IF EXISTS mysqlslap; +CREATE DATABASE mysqlslap; +use mysqlslap; +CREATE TABLE a1 (a int, b int) ENGINE=ROCKSDB; +INSERT INTO a1 VALUES (1, 1); +SHOW CREATE TABLE a1; +Table Create Table +a1 CREATE TABLE `a1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result new file mode 100644 index 0000000000000..d75a548e6ffc1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result @@ -0,0 +1,151 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +connection con1; +COMMIT; +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (READ COMMITTED), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +1 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4); +BEGIN; +connection con2; +INSERT INTO r1 values (5,5,5); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +connection con2; +INSERT INTO r1 values (6,6,6); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO r1 values (7,7,7); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +connection con2; +INSERT INTO r1 values (8,8,8); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO r1 values (9,9,9); +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +connection con2; +INSERT INTO r1 values (10,10,10); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 1105 +INSERT INTO r1 values (11,11,11); +ERROR: 0 +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +11 11 11 +drop table r1; +connection default; +disconnect con1; +disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result new file mode 100644 index 0000000000000..7458e6b72c3fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result @@ -0,0 +1,144 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +connection con1; +COMMIT; +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (REPEATABLE READ), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4); +BEGIN; +connection con2; +INSERT INTO r1 values (5,5,5); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +connection con2; +INSERT INTO r1 values (6,6,6); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO r1 values (7,7,7); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +connection con2; +INSERT INTO r1 values (8,8,8); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +COMMIT; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO r1 values (9,9,9); +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +connection con2; +INSERT INTO r1 values (10,10,10); +connection con1; +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR: 0 +INSERT INTO r1 values (11,11,11); +ERROR: 1105 +SELECT * FROM r1; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +drop table r1; +connection default; +disconnect con1; +disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result new file mode 100644 index 0000000000000..9c55b0dd6891b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_serializable.result @@ -0,0 +1,24 @@ +# -- WARNING ---------------------------------------------------------------- +# According to I_S.ENGINES, does not support transactions. +# If it is true, the test will most likely fail; you can +# either create an rdiff file, or add the test to disabled.def. +# If transactions should be supported, check the data in Information Schema. +# --------------------------------------------------------------------------- +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result new file mode 100644 index 0000000000000..0c08b09260310 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result @@ -0,0 +1,74 @@ +# +# Test how MyRocks handles reading corrupted data from disk. +# Data corruption is simulated at source-code level. +# +# +# A test for case when data in the table *record* is longer +# than table DDL expects it to be +# +create table t1 ( +pk int not null primary key, +col1 varchar(10) +) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +select * from t1; +pk col1 +1 1 +2 2 +3 3 +set @tmp1=@@rocksdb_verify_row_debug_checksums; +set rocksdb_verify_row_debug_checksums=1; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; +select * from t1 where pk=1; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; +set rocksdb_verify_row_debug_checksums=@tmp1; +select * from t1 where pk=1; +pk col1 +1 1 +set session debug_dbug= "+d,myrocks_simulate_bad_row_read2"; +select * from t1 where pk=1; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_row_read2"; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read3"; +select * from t1 where pk=1; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_row_read3"; +insert into t1 values(4,'0123456789'); +select * from t1; +pk col1 +1 1 +2 2 +3 3 +4 0123456789 +drop table t1; +# +# A test for case when index data is longer than table DDL +# expects it to be +# +create table t2 ( +pk varchar(4) not null primary key, +col1 int not null +) engine=rocksdb collate latin1_bin; +insert into t2 values ('ABCD',1); +select * from t2; +pk col1 +ABCD 1 +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; +select * from t2; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; +drop table t2; +create table t2 ( +pk varchar(4) not null primary key, +col1 int not null +) engine=rocksdb; +insert into t2 values ('ABCD',1); +select * from t2; +pk col1 +ABCD 1 +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; +select * from t2; +ERROR HY000: Got error 122 "Internal (unspecified) error in handler" from storage engine ROCKSDB +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/create_table.result b/storage/rocksdb/mysql-test/rocksdb/r/create_table.result new file mode 100644 index 0000000000000..8c879d82611ea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/create_table.result @@ -0,0 +1,165 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CREATE TABLE IF NOT EXISTS t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +Warnings: +Note 1050 Table 't1' already exists +CREATE TABLE t2 LIKE t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +ERROR HY000: Table storage engine 'ROCKSDB' does not support the create option 'TEMPORARY' +DROP TABLE t2; +DROP TABLE IF EXISTS t1; +SET default_storage_engine = rocksdb; +CREATE TABLE t1 (a INT PRIMARY KEY); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY) AS SELECT 1 AS a UNION SELECT 2 AS a; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT * FROM t1; +a +1 +2 +FLUSH LOGS; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT, +c10 INT,c11 INT,c12 INT,c13 INT,c14 INT,c15 INT,c16 INT,c17 INT, +c18 INT,c19 INT,c20 INT,c21 INT,c22 INT,c23 INT,c24 INT,c25 INT, +c26 INT,c27 INT,c28 INT,c29 INT,c30 INT,c31 INT,c32 INT,c33 INT, +c34 INT,c35 INT,c36 INT,c37 INT,c38 INT,c39 INT,c40 INT,c41 INT, +c42 INT,c43 INT,c44 INT,c45 INT,c46 INT,c47 INT,c48 INT,c49 INT, +c50 INT,c51 INT,c52 INT,c53 INT,c54 INT,c55 INT,c56 INT,c57 INT, +c58 INT,c59 INT,c60 INT,c61 INT,c62 INT,c63 INT,c64 INT,c65 INT, +c66 INT,c67 INT,c68 INT,c69 INT,c70 INT,c71 INT,c72 INT,c73 INT, +c74 INT,c75 INT,c76 INT,c77 INT,c78 INT,c79 INT,c80 INT,c81 INT, +c82 INT,c83 INT,c84 INT,c85 INT,c86 INT,c87 INT,c88 INT,c89 INT, +c90 INT,c91 INT,c92 INT,c93 INT,c94 INT,c95 INT,c96 INT,c97 INT, +c98 INT,c99 INT,c100 INT,c101 INT,c102 INT,c103 INT,c104 INT, +c105 INT,c106 INT,c107 INT,c108 INT,c109 INT,c110 INT,c111 INT, +c112 INT,c113 INT,c114 INT,c115 INT,c116 INT,c117 INT,c118 INT, +c119 INT,c120 INT,c121 INT,c122 INT,c123 INT,c124 INT,c125 INT, +c126 INT,c127 INT,c128 INT,c129 INT,c130 INT,c131 INT,c132 INT, +c133 INT,c134 INT,c135 INT,c136 INT,c137 INT,c138 INT,c139 INT, +c140 INT,c141 INT,c142 INT,c143 INT,c144 INT,c145 INT,c146 INT, +c147 INT,c148 INT,c149 INT,c150 INT,c151 INT,c152 INT,c153 INT, +c154 INT,c155 INT,c156 INT,c157 INT,c158 INT,c159 INT,c160 INT, +c161 INT,c162 INT,c163 INT,c164 INT,c165 INT,c166 INT,c167 INT, +c168 INT,c169 INT,c170 INT,c171 INT,c172 INT,c173 INT,c174 INT, +c175 INT,c176 INT,c177 INT,c178 INT,c179 INT,c180 INT,c181 INT, +c182 INT,c183 INT,c184 INT,c185 INT,c186 INT,c187 INT,c188 INT, +c189 INT,c190 INT,c191 INT,c192 INT,c193 INT,c194 INT,c195 INT, +c196 INT,c197 INT,c198 INT,c199 INT,c200 INT,c201 INT,c202 INT, +c203 INT,c204 INT,c205 INT,c206 INT,c207 INT,c208 INT,c209 INT, +c210 INT,c211 INT,c212 INT,c213 INT,c214 INT,c215 INT,c216 INT, +c217 INT,c218 INT,c219 INT,c220 INT,c221 INT,c222 INT,c223 INT, +c224 INT,c225 INT,c226 INT,c227 INT,c228 INT,c229 INT,c230 INT, +c231 INT,c232 INT,c233 INT,c234 INT,c235 INT,c236 INT,c237 INT, +c238 INT,c239 INT,c240 INT,c241 INT,c242 INT,c243 INT,c244 INT, +c245 INT,c246 INT,c247 INT,c248 INT,c249 INT,c250 INT,c251 INT, +c252 INT,c253 INT,c254 INT,c255 INT,c256 INT,c257 INT,c258 INT, +c259 INT,c260 INT,c261 INT,c262 INT,c263 INT,c264 INT,c265 INT, +c266 INT,c267 INT,c268 INT,c269 INT,c270 INT,c271 INT,c272 INT, +c273 INT,c274 INT,c275 INT,c276 INT,c277 INT,c278 INT,c279 INT, +c280 INT,c281 INT,c282 INT,c283 INT,c284 INT,c285 INT,c286 INT, +c287 INT,c288 INT,c289 INT,c290 INT,c291 INT,c292 INT,c293 INT, +c294 INT,c295 INT,c296 INT,c297 INT,c298 INT,c299 INT,c300 INT, +c301 INT,c302 INT,c303 INT,c304 INT,c305 INT,c306 INT,c307 INT, +c308 INT,c309 INT,c310 INT,c311 INT,c312 INT,c313 INT,c314 INT, +c315 INT,c316 INT,c317 INT,c318 INT,c319 INT,c320 INT,c321 INT, +c322 INT,c323 INT,c324 INT,c325 INT,c326 INT,c327 INT,c328 INT, +c329 INT,c330 INT,c331 INT,c332 INT,c333 INT,c334 INT,c335 INT, +c336 INT,c337 INT,c338 INT,c339 INT,c340 INT,c341 INT,c342 INT, +c343 INT,c344 INT,c345 INT,c346 INT,c347 INT,c348 INT,c349 INT, +c350 INT,c351 INT,c352 INT,c353 INT,c354 INT,c355 INT,c356 INT, +c357 INT,c358 INT,c359 INT,c360 INT,c361 INT,c362 INT,c363 INT, +c364 INT,c365 INT,c366 INT,c367 INT,c368 INT,c369 INT,c370 INT, +c371 INT,c372 INT,c373 INT,c374 INT,c375 INT,c376 INT,c377 INT, +c378 INT,c379 INT,c380 INT,c381 INT,c382 INT,c383 INT,c384 INT, +c385 INT,c386 INT,c387 INT,c388 INT,c389 INT,c390 INT,c391 INT, +c392 INT,c393 INT,c394 INT,c395 INT,c396 INT,c397 INT,c398 INT, +c399 INT,c400 INT,c401 INT,c402 INT,c403 INT,c404 INT,c405 INT, +c406 INT,c407 INT,c408 INT,c409 INT,c410 INT,c411 INT,c412 INT, +c413 INT,c414 INT,c415 INT,c416 INT,c417 INT,c418 INT,c419 INT, +c420 INT,c421 INT,c422 INT,c423 INT,c424 INT,c425 INT,c426 INT, +c427 INT,c428 INT,c429 INT,c430 INT,c431 INT,c432 INT,c433 INT, +c434 INT,c435 INT,c436 INT,c437 INT,c438 INT,c439 INT,c440 INT, +c441 INT,c442 INT,c443 INT,c444 INT,c445 INT,c446 INT,c447 INT, +c448 INT, +KEY (c1,c2,c3,c4,c5,c6,c7),KEY (c8,c9,c10,c11,c12,c13,c14), +KEY (c15,c16,c17,c18,c19,c20,c21),KEY (c22,c23,c24,c25,c26,c27,c28), +KEY (c29,c30,c31,c32,c33,c34,c35),KEY (c36,c37,c38,c39,c40,c41,c42), +KEY (c43,c44,c45,c46,c47,c48,c49),KEY (c50,c51,c52,c53,c54,c55,c56), +KEY (c57,c58,c59,c60,c61,c62,c63),KEY (c64,c65,c66,c67,c68,c69,c70), +KEY (c71,c72,c73,c74,c75,c76,c77),KEY (c78,c79,c80,c81,c82,c83,c84), +KEY (c85,c86,c87,c88,c89,c90,c91),KEY (c92,c93,c94,c95,c96,c97,c98), +KEY (c99,c100,c101,c102,c103,c104,c105), +KEY (c106,c107,c108,c109,c110,c111,c112), +KEY (c113,c114,c115,c116,c117,c118,c119), +KEY (c120,c121,c122,c123,c124,c125,c126), +KEY (c127,c128,c129,c130,c131,c132,c133), +KEY (c134,c135,c136,c137,c138,c139,c140), +KEY (c141,c142,c143,c144,c145,c146,c147), +KEY (c148,c149,c150,c151,c152,c153,c154), +KEY (c155,c156,c157,c158,c159,c160,c161), +KEY (c162,c163,c164,c165,c166,c167,c168), +KEY (c169,c170,c171,c172,c173,c174,c175), +KEY (c176,c177,c178,c179,c180,c181,c182), +KEY (c183,c184,c185,c186,c187,c188,c189), +KEY (c190,c191,c192,c193,c194,c195,c196), +KEY (c197,c198,c199,c200,c201,c202,c203), +KEY (c204,c205,c206,c207,c208,c209,c210), +KEY (c211,c212,c213,c214,c215,c216,c217), +KEY (c218,c219,c220,c221,c222,c223,c224), +KEY (c225,c226,c227,c228,c229,c230,c231), +KEY (c232,c233,c234,c235,c236,c237,c238), +KEY (c239,c240,c241,c242,c243,c244,c245), +KEY (c246,c247,c248,c249,c250,c251,c252), +KEY (c253,c254,c255,c256,c257,c258,c259), +KEY (c260,c261,c262,c263,c264,c265,c266), +KEY (c267,c268,c269,c270,c271,c272,c273), +KEY (c274,c275,c276,c277,c278,c279,c280), +KEY (c281,c282,c283,c284,c285,c286,c287), +KEY (c288,c289,c290,c291,c292,c293,c294), +KEY (c295,c296,c297,c298,c299,c300,c301), +KEY (c302,c303,c304,c305,c306,c307,c308), +KEY (c309,c310,c311,c312,c313,c314,c315), +KEY (c316,c317,c318,c319,c320,c321,c322), +KEY (c323,c324,c325,c326,c327,c328,c329), +KEY (c330,c331,c332,c333,c334,c335,c336), +KEY (c337,c338,c339,c340,c341,c342,c343), +KEY (c344,c345,c346,c347,c348,c349,c350), +KEY (c351,c352,c353,c354,c355,c356,c357), +KEY (c358,c359,c360,c361,c362,c363,c364), +KEY (c365,c366,c367,c368,c369,c370,c371), +KEY (c372,c373,c374,c375,c376,c377,c378), +KEY (c379,c380,c381,c382,c383,c384,c385), +KEY (c386,c387,c388,c389,c390,c391,c392), +KEY (c393,c394,c395,c396,c397,c398,c399), +KEY (c400,c401,c402,c403,c404,c405,c406), +KEY (c407,c408,c409,c410,c411,c412,c413), +KEY (c414,c415,c416,c417,c418,c419,c420), +KEY (c421,c422,c423,c424,c425,c426,c427), +KEY (c428,c429,c430,c431,c432,c433,c434), +KEY (c435,c436,c437,c438,c439,c440,c441), +KEY (c442,c443,c444,c445,c446,c447,c448)); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result b/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result new file mode 100644 index 0000000000000..3e2f5709ca052 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/deadlock.result @@ -0,0 +1,37 @@ +# +# Validate that deadlock errors don't occur with a high level of concurrency +# +# Disable for valgrind because this takes too long +DROP DATABASE IF EXISTS mysqlslap; +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2), KEY(id2)) ENGINE=rocksdb; +CREATE TABLE t1rev(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2) COMMENT "rev:cf2", KEY(id2) COMMENT "rev:cf2") ENGINE=rocksdb; +SET @save = @@global.rocksdb_lock_wait_timeout; +SET GLOBAL rocksdb_lock_wait_timeout = 60; +SELECT count from t1; +count +50000 +SELECT count from t1; +count +100000 +SELECT count from t1; +count +150000 +SELECT count from t1; +count +200000 +SELECT count from t1rev; +count +50000 +SELECT count from t1rev; +count +100000 +SELECT count from t1rev; +count +150000 +SELECT count from t1rev; +count +200000 +SET GLOBAL rocksdb_lock_wait_timeout = @save; +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete.result b/storage/rocksdb/mysql-test/rocksdb/r/delete.result new file mode 100644 index 0000000000000..8ec3c50f4664e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete.result @@ -0,0 +1,166 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +DELETE FROM t1 WHERE b IN ('c'); +SELECT a,b FROM t1; +a b +1 a +1 a +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +5 e +5 e +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +SELECT c,d FROM t2; +c d +b 2 +b 2 +d 4 +e 5 +e 5 +foobar 10000 +foobar 10000 +DELETE t2.* FROM t1, t2 WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +5 e +5 e +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +DELETE FROM t2, t1.* USING t2, t1 WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +2 b +2 b +4 d +5 e +5 e +SELECT c,d FROM t2; +c d +DELETE FROM t1; +SELECT a,b FROM t1; +a b +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +BEGIN; +DELETE FROM t1 WHERE b IN ('c'); +SELECT a,b FROM t1; +a b +1 a +1 a +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +COMMIT; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; +DELETE FROM t1; +RELEASE SAVEPOINT spt1; +ROLLBACK; +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; +DELETE FROM t1; +INSERT INTO t1 (a,b) VALUES (1,'a'); +ROLLBACK TO SAVEPOINT spt1; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +COMMIT; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +SELECT a,b FROM t1; +a b +10000 foobar +10000 foobar +2 b +2 b +4 d +4 d +5 e +5 e +6 f +6 f +7 g +7 g +8 h +8 h +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result new file mode 100644 index 0000000000000..a8ea5e1677f8b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_before_lock.result @@ -0,0 +1,22 @@ +connect con, localhost, root,,; +connection default; +set debug_sync='RESET'; +drop table if exists t1; +create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb; +insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2); +connection con; +set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go'; +update t1 set value=100 where id1=1; +connection default; +set debug_sync='now WAIT_FOR parked'; +delete from t1 where id1=1 and id2=1; +set debug_sync='now SIGNAL go'; +connection con; +select * from t1 where id1=1 for update; +id1 id2 value +1 2 100 +1 3 100 +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result new file mode 100644 index 0000000000000..f8ac42c4e72c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_ignore.result @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR(8), d INT) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +SELECT a,b FROM t1; +a b +1 a +1 a +10000 foobar +10000 foobar +2 b +2 b +3 c +3 c +4 d +4 d +5 e +5 e +SELECT c,d FROM t2; +c d +a 1 +a 1 +b 2 +b 2 +c 3 +c 3 +d 4 +d 4 +e 5 +e 5 +foobar 10000 +foobar 10000 +DELETE IGNORE FROM t1 WHERE b IS NOT NULL ORDER BY a LIMIT 1; +SELECT a,b FROM t1; +a b +1 a +10000 foobar +10000 foobar +2 b +2 b +3 c +3 c +4 d +4 d +5 e +5 e +DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +Warnings: +Warning 1242 Subquery returns more than 1 row +SELECT a,b FROM t1; +a b +1 a +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result new file mode 100644 index 0000000000000..4173d875a82e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_quick.result @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +DELETE QUICK FROM t1 WHERE a = 1 OR b > 'foo'; +SELECT a,b FROM t1; +a b +2 b +3 c +4 d +5 e +CREATE TABLE t2 (c CHAR(8), d INT, PRIMARY KEY (c)) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +SELECT c,d FROM t2; +c d +b 2 +c 3 +d 4 +e 5 +DELETE QUICK FROM t2, t1.* USING t2, t1 WHERE c IS NULL OR a = d; +SELECT a,b FROM t1; +a b +SELECT c,d FROM t2; +c d +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result new file mode 100644 index 0000000000000..c94708b872ff4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/delete_with_keys.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; +SELECT a,b FROM t1; +a b +1 a +3 c +4 d +5 e +6 x +7 y +DELETE FROM t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; +SELECT a,b FROM t1; +a b +1 a +3 c +4 d +5 e +6 x +7 y +DELETE FROM t1; +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a), KEY (b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6),(7,8,9); +DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1; +SELECT a,b,c FROM t1; +a b c +1 2 3 +4 5 6 +7 8 9 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/describe.result b/storage/rocksdb/mysql-test/rocksdb/r/describe.result new file mode 100644 index 0000000000000..6d43f89c9bd4f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/describe.result @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foo'),(2, 'b'); +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb CHARACTER SET utf8; +INSERT INTO t2 (a,b) VALUES (1, 'bar'); +CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; +DESCRIBE t1; +Field Type Null Key Default Extra +a int(11) NO PRI NULL +b char(8) YES NULL +DESC t2 a; +Field Type Null Key Default Extra +a int(11) YES NULL +DESCRIBE t3 '%'; +Field Type Null Key Default Extra +a int(11) YES NULL +b char(8) YES NULL +pk int(11) NO PRI NULL auto_increment +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result new file mode 100644 index 0000000000000..f220efe9f384f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_database.result @@ -0,0 +1,6 @@ +DROP DATABASE IF EXISTS test_drop_database; +CREATE DATABASE test_drop_database; +CREATE TABLE t1 (a int, b int, c int, primary key (a), unique key (b)) ENGINE=ROCKSDB; +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY (a); +DROP TABLE t1; +DROP DATABASE test_drop_database; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result new file mode 100644 index 0000000000000..dfa5c5b2590cf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result @@ -0,0 +1,154 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT, b INT AUTO_INCREMENT, KEY ka(a), KEY kb(a,b), PRIMARY KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`b`), + KEY `ka` (`a`), + KEY `kb` (`a`,`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (a) VALUES (1); +INSERT INTO t1 (a) VALUES (3); +INSERT INTO t1 (a) VALUES (5); +ALTER TABLE t1 DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`b`), + KEY `kb` (`a`,`b`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +SELECT * FROM t1 FORCE INDEX(ka) where a > 1; +ERROR 42000: Key 'ka' doesn't exist in table 't1' +SELECT * FROM t1 FORCE INDEX(kb) where a > 1; +a b +3 2 +5 3 +SELECT * FROM t1 where b > 1; +a b +3 2 +5 3 +DROP TABLE t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`), + KEY `kbc` (`b`,`c`), + KEY `kc` (`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kc` (`c`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kc` (`c`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; +a b c +2 3 4 +3 5 6 +5 3 4 +6 5 6 +SELECT * FROM t1 where b > 3; +a b c +3 5 6 +6 5 6 +DROP TABLE t1; +CREATE TABLE t1 (a INT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 1 kb 1 b A 0 NULL NULL YES LSMTREE +t1 1 kbc 1 b A 0 NULL NULL YES LSMTREE +t1 1 kbc 2 c A 0 NULL NULL YES LSMTREE +t1 1 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 1 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP PRIMARY KEY; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +DROP TABLE t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, PRIMARY KEY(a)) ENGINE=rocksdb; +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c); +ALTER TABLE t1 ADD UNIQUE INDEX kc(c); +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 0 kb 1 b A 0 NULL NULL YES LSMTREE +t1 0 kbc 1 b A 0 NULL NULL YES LSMTREE +t1 0 kbc 2 c A 0 NULL NULL YES LSMTREE +t1 0 kc 1 c A 0 NULL NULL YES LSMTREE +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE +t1 0 kc 1 c A 0 NULL NULL YES LSMTREE +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; +a b c +2 3 4 +3 5 6 +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result new file mode 100644 index 0000000000000..fbe6f35126de6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result @@ -0,0 +1,73 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t2 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t3 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t4 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +DELETE FROM t4; +drop table t2; +DELETE FROM t1; +DELETE FROM t4; +drop table t3; +DELETE FROM t1; +DELETE FROM t4; +drop table t4; +CREATE TABLE t5 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t5; +drop table t5; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +Begin filtering dropped index+ 0 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Begin filtering dropped index+ 1 +Finished filtering dropped index+ 0 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +Finished filtering dropped index+ 1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result new file mode 100644 index 0000000000000..83d9fd9493fa6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result @@ -0,0 +1,55 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t2 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t3 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +CREATE TABLE t4 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +DELETE FROM t2; +DELETE FROM t3; +DELETE FROM t4; +DELETE FROM t1; +DELETE FROM t4; +DELETE FROM t1; +DELETE FROM t4; +CREATE TABLE t5 ( +a int not null, +b int not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t5; +drop table t1; +drop table t2; +drop table t3; +drop table t4; +drop table t5; +Compacted diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result new file mode 100644 index 0000000000000..e5237fe9b1e7b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result @@ -0,0 +1,22 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); +DROP TABLE IF EXISTS t1; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +c varchar(500) not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +drop table t1; +select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +case when variable_value-@a < 500000 then 'true' else 'false' end +true +DROP TABLE IF EXISTS t1; +Warnings: +Note 1051 Unknown table 'test.t1' diff --git a/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result b/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result new file mode 100644 index 0000000000000..954335debf28e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/dup_key_update.result @@ -0,0 +1,362 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (id1 INT, id2 INT, id3 INT, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; +CREATE TABLE t2 (id1 INT, id2 INT, id3 INT, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t1 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, +id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, +id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; +CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, +id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, +id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +PRIMARY KEY (id1, id2, id3), +UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t1 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t1 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t1 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t1 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t1 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t1 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t1; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t1 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 9; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 1 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 1 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 10; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 10 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 10 1 +INSERT INTO t2 VALUES (1, 1, 1) ON DUPLICATE KEY UPDATE id2 = 11; +SELECT * FROM t2 WHERE id1 = 1; +id1 id2 id3 +1 11 1 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 1; +id1 id2 id3 +1 11 1 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 12; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 12 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 12 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 13; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 13 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 13 5 +INSERT INTO t2 VALUES (5, 5, 5) ON DUPLICATE KEY UPDATE id2 = 14; +SELECT * FROM t2 WHERE id1 = 5; +id1 id2 id3 +5 14 5 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 5; +id1 id2 id3 +5 14 5 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 15; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 15 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 15 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 16; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 16 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 16 9 +INSERT INTO t2 VALUES (9, 9, 9) ON DUPLICATE KEY UPDATE id2 = 17; +SELECT * FROM t2 WHERE id1 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3) WHERE id3 = 9; +id1 id2 id3 +9 17 9 +SELECT * FROM t2; +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +SELECT * FROM t2 FORCE INDEX (id3); +id1 id2 id3 +1 11 1 +2 2 2 +3 3 3 +4 4 4 +5 14 5 +6 6 6 +7 7 7 +8 8 8 +9 17 9 +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result new file mode 100644 index 0000000000000..ba16aaa6d352d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/duplicate_table.result @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t(id int primary key) engine=rocksdb; +INSERT INTO t values (1), (2), (3); +CREATE TABLE t(id int primary key) engine=rocksdb; +ERROR 42S01: Table 't' already exists +FLUSH TABLES; +CREATE TABLE t(id int primary key) engine=rocksdb; +ERROR HY000: Table 'test.t' does not exist, but metadata information exists inside MyRocks. This is a sign of data inconsistency. Please check if './test/t.frm' exists, and try to restore it if it does not exist. +FLUSH TABLES; +SELECT * FROM t; +id +1 +2 +3 +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result new file mode 100644 index 0000000000000..df90f2b3670e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/fail_system_cf.result @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB; +ERROR HY000: Incorrect arguments to column family not valid for storing index data. +DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result new file mode 100644 index 0000000000000..483be726bb321 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (b INT PRIMARY KEY); +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b)); +ERROR 42000: MyRocks does not currently support foreign key constraints +CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL); +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL); +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b)); +ERROR 42000: MyRocks does not currently support foreign key constraints +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL); +ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b); +ERROR 42000: MyRocks does not currently support foreign key constraints +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD bforeign INT NOT NULL; +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD foreignkey INT NOT NULL; +DROP TABLE t2; +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b); +ERROR 42000: MyRocks does not currently support foreign key constraints +DROP TABLE t2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result new file mode 100644 index 0000000000000..d42041183c8ed --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_issue254.result @@ -0,0 +1,9 @@ +create table t (id int primary key, value int); +begin; +update t set value=100 where id in (1, 2); +commit; +begin; +select * from t for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from t for update +commit; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result new file mode 100644 index 0000000000000..c1cf1e77ecf83 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/gap_lock_raise_error.result @@ -0,0 +1,504 @@ +drop table if exists gap1,gap2,gap3; +CREATE DATABASE mysqlslap; +CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT, +PRIMARY KEY (id1, id2, id3), +INDEX i (c1)) ENGINE=rocksdb; +CREATE TABLE gap2 like gap1; +CREATE TABLE gap3 (id INT, value INT, +PRIMARY KEY (id), +UNIQUE KEY ui(value)) ENGINE=rocksdb; +insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5); +create table gap4 ( +pk int primary key, +a int, +b int, +key(a) +) ENGINE=rocksdb; +insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +create table gap5 like gap4; +insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +set session gap_lock_raise_error=1; +set session gap_lock_write_log=1; +set @save_gap_lock_write_log = @@gap_lock_write_log; +set @save_gap_lock_raise_error = @@gap_lock_raise_error; +set gap_lock_write_log = 1; +set gap_lock_raise_error = 0; +begin; +update gap4 set a= (select 1+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b); +1 +update gap4 set a= (select 2+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b); +update gap4 set a= (select 3+max(a) from gap5 where gap5.pk between 1 and 3 and gap5.b=gap4.b); +1 +1 +0 +flush logs; +0 +rollback; +set gap_lock_write_log = @save_gap_lock_write_log; +set gap_lock_raise_error = @save_gap_lock_raise_error; +set global gap_lock_write_log = 1; +set global gap_lock_write_log = 0; +1000 +set session autocommit=0; +select * from gap1 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 for update +select * from gap1 where value != 100 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 for update +select * from gap1 where id1=1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 for update +select * from gap1 where id1=1 and id2= 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 for update +select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 for update +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 for update +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 for update +select * from gap1 order by id1 asc limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc limit 1 for update +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update +select * from gap1 order by id1 desc limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc limit 1 for update +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 for update +select * from gap1 force index(i) where c1=1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 force index(i) where c1=1 for update +select * from gap3 force index(ui) where value=1 for update; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 for update; +id1 id2 id3 c1 value +select * from gap3 where id=1 for update; +id value +1 1 +set session autocommit=1; +select * from gap1 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 for update; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 for update; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 for update; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 for update; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 for update; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 for update; +id1 id2 id3 c1 value +select * from gap3 where id=1 for update; +id value +1 1 +set session autocommit=0; +select * from gap1 limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 lock in share mode +select * from gap1 where value != 100 limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 lock in share mode +select * from gap1 where id1=1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 lock in share mode +select * from gap1 where id1=1 and id2= 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 lock in share mode +select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 lock in share mode +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 lock in share mode +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 lock in share mode +select * from gap1 order by id1 asc limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc limit 1 lock in share mode +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode +select * from gap1 order by id1 desc limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc limit 1 lock in share mode +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 lock in share mode +select * from gap1 force index(i) where c1=1 lock in share mode; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 force index(i) where c1=1 lock in share mode +select * from gap3 force index(ui) where value=1 lock in share mode; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 lock in share mode; +id1 id2 id3 c1 value +select * from gap3 where id=1 lock in share mode; +id value +1 1 +set session autocommit=1; +select * from gap1 limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 lock in share mode; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 lock in share mode; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 lock in share mode; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 lock in share mode; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 lock in share mode; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 lock in share mode; +id1 id2 id3 c1 value +select * from gap3 where id=1 lock in share mode; +id value +1 1 +set session autocommit=0; +select * from gap1 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 ; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 ; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 ; +id1 id2 id3 c1 value +select * from gap3 where id=1 ; +id value +1 1 +set session autocommit=1; +select * from gap1 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where id1=1 ; +id1 id2 id3 c1 value +1 0 2 2 2 +1 0 3 3 3 +select * from gap1 where id1=1 and id2= 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 != 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 and id3 +between 1 and 3 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 asc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2= 1 order by id3 desc +limit 1 ; +id1 id2 id3 c1 value +select * from gap1 order by id1 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 order by id1 desc limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 order by id1 desc, id2 desc, id3 desc +limit 1 ; +id1 id2 id3 c1 value +500 100 1000 1000 1000 +select * from gap1 force index(i) where c1=1 ; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap3 force index(ui) where value=1 ; +id value +1 1 +select * from gap1 where id1=1 and id2=1 and id3=1 ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ; +id1 id2 id3 c1 value +select * from gap1 where id1=1 and id2=1 and id3=1 and value=1 +order by c1 ; +id1 id2 id3 c1 value +select * from gap3 where id=1 ; +id value +1 1 +set session autocommit=0; +insert into gap1 (id1, id2, id3) values (-1,-1,-1); +insert into gap1 (id1, id2, id3) values (-1,-1,-1) +on duplicate key update value=100; +update gap1 set value=100 where id1=1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 set value=100 where id1=1 +update gap1 set value=100 where id1=1 and id2=1 and id3=1; +delete from gap1 where id1=2; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: delete from gap1 where id1=2 +delete from gap1 where id1=-1 and id2=-1 and id3=-1; +commit; +set session autocommit=1; +insert into gap1 (id1, id2, id3) values (-1,-1,-1); +insert into gap1 (id1, id2, id3) values (-1,-1,-1) +on duplicate key update value=100; +update gap1 set value=100 where id1=1; +update gap1 set value=100 where id1=1 and id2=1 and id3=1; +delete from gap1 where id1=2; +delete from gap1 where id1=-1 and id2=-1 and id3=-1; +commit; +set session autocommit=1; +insert into gap2 select * from gap1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: insert into gap2 select * from gap1 +insert into gap2 select * from gap1 where id1=1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: insert into gap2 select * from gap1 where id1=1 +insert into gap2 select * from gap1 where id1=1 and id2=1 and id3=1; +create table t4 select * from gap1 where id1=1 and id2=1 and id3=1; +drop table t4; +create table t4 select * from gap1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: create table t4 select * from gap1 +create table t4 select * from gap1 where id1=1; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: create table t4 select * from gap1 where id1=1 +update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3 +and gap2.id2=3 and gap2.id3=3; +update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 set gap1.value=100 where gap2.id1=3 +update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 join gap3 on gap1.id1=gap3.id +set gap1.value=100 where gap2.id1=3; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 join gap3 on gap1.id1=gap3.id +set gap1.value=100 where gap2.id1=3 +update gap1 set gap1.value= (select count(*) from gap2); +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: update gap1 set gap1.value= (select count(*) from gap2) +delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3 +and gap2.id2=3 and gap2.id3=3; +delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: delete gap1 from gap1 join gap2 on gap1.id1 and gap1.id2=gap2.id2 where gap2.id1=3 +select * from gap1, gap2 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1, gap2 limit 1 for update +select * from gap1 a, gap1 b limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 a, gap1 b limit 1 for update +create table u1( +c1 int, +c2 int, +c3 int, +c4 int, +primary key (c1, c2, c3), +unique key (c3, c1) +); +set session gap_lock_raise_error=1; +begin; +insert into u1 values (1,1,1,1); +commit; +begin; +insert into u1 values (1,2,1,1) on duplicate key update c4=10; +commit; +begin; +select * from u1 where c3=1 and c1 = 1 for update; +c1 c2 c3 c4 +1 1 1 10 +select * from u1 where c3=1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from u1 where c3=1 for update +commit; +drop table u1; +set global gap_lock_write_log= 0; +set global gap_lock_raise_error= 0; +drop table if exists gap1, gap2, gap3, gap4, gap5; +DROP DATABASE mysqlslap; +0 +SET GLOBAL gap_lock_log_file=''; +SET GLOBAL gap_lock_log_file=''; +flush general logs; +SET @save_gap_lock_exceptions = @@global.gap_lock_exceptions; +SET GLOBAL gap_lock_exceptions="t.*"; +drop table if exists gap1,gap2,gap3; +CREATE DATABASE mysqlslap; +CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT, +PRIMARY KEY (id1, id2, id3), +INDEX i (c1)) ENGINE=rocksdb; +CREATE TABLE gap2 like gap1; +CREATE TABLE gap3 (id INT, value INT, +PRIMARY KEY (id), +UNIQUE KEY ui(value)) ENGINE=rocksdb; +insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5); +create table gap4 ( +pk int primary key, +a int, +b int, +key(a) +) ENGINE=rocksdb; +insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +create table gap5 like gap4; +insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +set session gap_lock_raise_error=1; +set session gap_lock_write_log=1; +set session autocommit=0; +select * from gap1 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 limit 1 for update +select * from gap1 where value != 100 limit 1 for update; +ERROR HY000: Using Gap Lock without full unique key in multi-table or multi-statement transactions is not allowed. You need either 1: Execute 'SET SESSION gap_lock_raise_error=0' if you are sure that your application does not rely on Gap Lock. 2: Rewrite queries to use all unique key columns in WHERE equal conditions. 3: Rewrite to single-table, single-statement transaction. Query: select * from gap1 where value != 100 limit 1 for update +set global gap_lock_write_log= 0; +set global gap_lock_raise_error= 0; +drop table if exists gap1, gap2, gap3, gap4, gap5; +DROP DATABASE mysqlslap; +0 +SET GLOBAL gap_lock_log_file=''; +SET GLOBAL gap_lock_log_file=''; +flush general logs; +SET GLOBAL gap_lock_exceptions="gap.*"; +drop table if exists gap1,gap2,gap3; +CREATE DATABASE mysqlslap; +CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT, +PRIMARY KEY (id1, id2, id3), +INDEX i (c1)) ENGINE=rocksdb; +CREATE TABLE gap2 like gap1; +CREATE TABLE gap3 (id INT, value INT, +PRIMARY KEY (id), +UNIQUE KEY ui(value)) ENGINE=rocksdb; +insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5); +create table gap4 ( +pk int primary key, +a int, +b int, +key(a) +) ENGINE=rocksdb; +insert into gap4 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +create table gap5 like gap4; +insert into gap5 values (1,1,1), (2,2,2), (3,3,3), (4,4,4); +set session gap_lock_raise_error=1; +set session gap_lock_write_log=1; +set session autocommit=0; +select * from gap1 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +select * from gap1 where value != 100 limit 1 for update; +id1 id2 id3 c1 value +0 0 1 1 1 +set global gap_lock_write_log= 0; +set global gap_lock_raise_error= 0; +drop table if exists gap1, gap2, gap3, gap4, gap5; +DROP DATABASE mysqlslap; +0 +SET GLOBAL gap_lock_log_file=''; +SET GLOBAL gap_lock_log_file=''; +flush general logs; +SET GLOBAL gap_lock_exceptions=@save_gap_lock_exceptions; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result b/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result new file mode 100644 index 0000000000000..04dcac1fcb44c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/get_error_message.result @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT,b INT,KEY (a)) PARTITION BY HASH (a) PARTITIONS 3; +SHOW TABLES; +Tables_in_test +t1 +ALTER TABLE t1 ADD PARTITION(PARTITION p3 DATA DIRECTORY='G:/mysqltest/p3Data' INDEX DIRECTORY='H:/mysqltest/p3Index'); +ERROR 42000: Incorrect table name 'H:/mysqltest/p3Index' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result new file mode 100644 index 0000000000000..efc6ccde50090 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/handler_basic.result @@ -0,0 +1,127 @@ +DROP TABLE IF EXISTS t1; +FLUSH STATUS; +CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(100), b INT, +INDEX b(b)) ENGINE=rocksdb; +INSERT INTO t1 (id,a,b) VALUES (1,'foobar',100),(2,'z',0),(3,'bar',50); +SHOW SESSION STATUS LIKE 'Handler_write%'; +Variable_name Value +Handler_write 3 +UPDATE t1 SET b=1000 WHERE id=1; +SHOW SESSION STATUS LIKE 'Handler_update%'; +Variable_name Value +Handler_update 1 +DELETE FROM t1 WHERE id=2; +SHOW SESSION STATUS LIKE 'Handler_delete%'; +Variable_name Value +Handler_delete 1 +INSERT INTO t1 (id,b) VALUES(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +SHOW SESSION STATUS LIKE 'Handler_write%'; +Variable_name Value +Handler_write 10 +FLUSH STATUS; +SELECT * FROM t1 WHERE id=8; +id a b +8 NULL 8 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 0 +Handler_read_prev 0 +Handler_read_retry 0 +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1 WHERE b=6; +id a b +6 NULL 6 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 1 +Handler_read_prev 0 +Handler_read_retry 0 +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1; +id a b +1 foobar 1000 +10 NULL 10 +3 bar 50 +4 NULL 4 +5 NULL 5 +6 NULL 6 +7 NULL 7 +8 NULL 8 +9 NULL 9 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 0 +Handler_read_last 0 +Handler_read_next 0 +Handler_read_prev 0 +Handler_read_retry 0 +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 10 +FLUSH STATUS; +SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b; +id a b +4 NULL 4 +5 NULL 5 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 2 +Handler_read_prev 0 +Handler_read_retry 0 +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1 WHERE id >=8 ORDER BY id; +id a b +8 NULL 8 +9 NULL 9 +10 NULL 10 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 0 +Handler_read_key 1 +Handler_read_last 0 +Handler_read_next 3 +Handler_read_prev 0 +Handler_read_retry 0 +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 0 +FLUSH STATUS; +SELECT * FROM t1 WHERE id < 8 ORDER BY id; +id a b +1 foobar 1000 +3 bar 50 +4 NULL 4 +5 NULL 5 +6 NULL 6 +7 NULL 7 +SHOW SESSION STATUS LIKE 'Handler_read%'; +Variable_name Value +Handler_read_first 1 +Handler_read_key 0 +Handler_read_last 0 +Handler_read_next 6 +Handler_read_prev 0 +Handler_read_retry 0 +Handler_read_rnd 0 +Handler_read_rnd_deleted 0 +Handler_read_rnd_next 0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result new file mode 100644 index 0000000000000..e4d080289dc99 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result @@ -0,0 +1,648 @@ +DROP TABLE IF EXISTS test; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con3,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connection con1; +create table test (id int primary key, value int) engine=rocksdb; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test; +id value +1 10 +2 20 +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +rollback; +connection con2; +select * from test; +id value +1 10 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +commit; +connection con2; +select * from test; +id value +1 11 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 22 where id = 2; +connection con1; +select * from test where id = 2; +id value +2 20 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +update test set value = 19 where id = 2; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +update test set value = 18 where id = 2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +commit; +connection con3; +select * from test; +id value +1 12 +2 18 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value = 30; +id value +connection con2; +insert into test (id, value) values(3, 30); +commit; +connection con1; +select * from test where value % 3 = 0; +id value +3 30 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = value + 10; +connection con2; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +select * from test; +id value +1 10 +2 20 +delete from test where value = 20; +connection con1; +commit; +connection con2; +select * from test; +id value +2 30 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +select * from test; +id value +1 12 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +select * from test where id = 2; +id value +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +select * from test where id = 2; +id value +2 18 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 5 = 0; +id value +1 10 +2 20 +connection con2; +update test set value = 12 where value = 10; +commit; +connection con1; +select * from test where value % 3 = 0; +id value +1 12 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test; +id value +1 10 +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +delete from test where value = 20; +select * from test where id = 2; +id value +2 18 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con2; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 21 where id = 2; +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 3 = 0; +id value +connection con2; +select * from test where value % 3 = 0; +id value +connection con1; +insert into test (id, value) values(3, 30); +connection con2; +insert into test (id, value) values(4, 42); +connection con1; +commit; +connection con2; +commit; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection con1; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection default; +drop table test; +disconnect con1; +disconnect con2; +disconnect con3; +DROP TABLE IF EXISTS test; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con3,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connection con1; +create table test (id int primary key, value int) engine=rocksdb; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test; +id value +1 10 +2 20 +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +rollback; +connection con2; +select * from test; +id value +1 10 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 101 where id = 1; +connection con2; +select * from test; +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +commit; +connection con2; +select * from test; +id value +1 10 +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 22 where id = 2; +connection con1; +select * from test where id = 2; +id value +2 20 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = 11 where id = 1; +update test set value = 19 where id = 2; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +update test set value = 18 where id = 2; +connection con3; +select * from test; +id value +1 11 +2 19 +connection con2; +commit; +connection con3; +select * from test; +id value +1 11 +2 19 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value = 30; +id value +connection con2; +insert into test (id, value) values(3, 30); +commit; +connection con1; +select * from test where value % 3 = 0; +id value +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +update test set value = value + 10; +connection con2; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +select * from test; +id value +1 10 +2 20 +delete from test where value = 20; +connection con1; +commit; +connection con2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +variable_value-@a +1 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test where id = 1; +id value +1 10 +select * from test where id = 2; +id value +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +select * from test where id = 2; +id value +2 20 +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 5 = 0; +id value +1 10 +2 20 +connection con2; +update test set value = 12 where value = 10; +commit; +connection con1; +select * from test where value % 3 = 0; +id value +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id = 1; +id value +1 10 +connection con2; +select * from test; +id value +1 10 +2 20 +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +delete from test where value = 20; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con2; +select * from test where id in (1,2); +id value +1 10 +2 20 +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 21 where id = 2; +connection con1; +commit; +connection con2; +commit; +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; +connection con1; +select * from test where value % 3 = 0; +id value +connection con2; +select * from test where value % 3 = 0; +id value +connection con1; +insert into test (id, value) values(3, 30); +connection con2; +insert into test (id, value) values(4, 42); +connection con1; +commit; +connection con2; +commit; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection con1; +select * from test where value % 3 = 0; +id value +3 30 +4 42 +connection default; +drop table test; +disconnect con1; +disconnect con2; +disconnect con3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result new file mode 100644 index 0000000000000..b37b0d0b72dac --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS is_ddl_t1; +DROP TABLE IF EXISTS is_ddl_t2; +CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, +PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf') +ENGINE = ROCKSDB; +CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, +PRIMARY KEY (z, y) COMMENT 'zy_cf', +KEY (x)) ENGINE = ROCKSDB; +SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF +test is_ddl_t1 NULL PRIMARY 1 11 default +test is_ddl_t1 NULL j 2 11 default +test is_ddl_t1 NULL k 2 11 kl_cf +test is_ddl_t2 NULL PRIMARY 1 11 zy_cf +test is_ddl_t2 NULL x 2 11 default +DROP TABLE is_ddl_t1; +DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index.result b/storage/rocksdb/mysql-test/rocksdb/r/index.result new file mode 100644 index 0000000000000..99390c8ceb21b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index.result @@ -0,0 +1,62 @@ +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY (a) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY a_b (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index +t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY (a), +KEY (b) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY (a) COMMENT 'simple index on a'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; +# +# Issue #376: MyRocks: ORDER BY optimizer is unable to use the index extension +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int); +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int not null, +a int not null, +b int not null, +primary key(pk), +key(a) +) engine=rocksdb; +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; +# This must have type=range, index=a, and must not have 'Using filesort': +explain select * from t2 force index (a) where a=0 and pk>=3 order by pk; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 8 NULL # Using index condition +drop table t0,t1,t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result new file mode 100644 index 0000000000000..2c7d37c053f76 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_file_map.result @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (i INT PRIMARY KEY, j INT, INDEX(j)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT PRIMARY KEY, l INT REFERENCES t1.i) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); +INSERT INTO t2 VALUES (100,1), (200,2), (300,3), (400,4); +COMMIT; +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 5 # # # # # 5,5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = +(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL +WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY"); +COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX +# # SSTNAME 4 # # # # # 4 +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result b/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result new file mode 100644 index 0000000000000..b0113d79bb23d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_key_block_size.result @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY (a) KEY_BLOCK_SIZE=8 +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY ind1(b ASC) KEY_BLOCK_SIZE=0 +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 ind1 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +PRIMARY KEY ind2(b(1) DESC) KEY_BLOCK_SIZE=32768 COMMENT 'big key_block_size value' +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # 1 NULL LSMTREE big key_block_size value +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT AUTO_INCREMENT PRIMARY KEY, +KEY a_b(a,b) KEY_BLOCK_SIZE=8192 +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE +t1 1 a_b 2 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +PRIMARY KEY (b) +) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY(a) KEY_BLOCK_SIZE 8192; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result b/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result new file mode 100644 index 0000000000000..66481f81c67d9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_primary.result @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, +b CHAR(8) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A # NULL NULL LSMTREE +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +INSERT INTO t1 (a,b) VALUES (1,'c'); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, +b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; +ERROR 42000: Multiple primary key defined +CREATE TABLE t1 (a INT, +b CHAR(8), +PRIMARY KEY (a,b) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A # NULL NULL LSMTREE +t1 0 PRIMARY 2 b A # NULL NULL LSMTREE +INSERT INTO t1 (a,b) VALUES (1,'a'),(1,'b'),(2,'a'),(2,'b'); +INSERT INTO t1 (a,b) VALUES (1,'b'); +ERROR 23000: Duplicate entry '1-b' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 (a INT KEY, +b CHAR(8), +KEY (b) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A # NULL NULL LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # NULL NULL LSMTREE +ALTER TABLE t1 ADD CONSTRAINT PRIMARY KEY pk (a); +ERROR 42000: Multiple primary key defined +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A # NULL NULL LSMTREE +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result b/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result new file mode 100644 index 0000000000000..a604663954b7f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_type_btree.result @@ -0,0 +1,42 @@ +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING BTREE (a) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY a_b USING BTREE (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index +t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING BTREE (a), +KEY USING BTREE (b) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY (a) USING BTREE COMMENT 'simple index on a'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result b/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result new file mode 100644 index 0000000000000..ae99badff1407 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_type_hash.result @@ -0,0 +1,42 @@ +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING HASH (a) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY a_b USING HASH (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a_b 1 a A # NULL NULL YES LSMTREE a_b index +t1 1 a_b 2 b A # NULL NULL YES LSMTREE a_b index +DROP TABLE t1; +CREATE TABLE t1 (a INT, +b CHAR(8), +pk INT PRIMARY KEY, +KEY USING HASH (a), +KEY USING HASH (b) +) ENGINE=rocksdb; +SHOW KEYS IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE +t1 1 b 1 b A # NULL NULL YES LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); +ALTER TABLE t1 ADD KEY (a) USING HASH COMMENT 'simple index on a'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A # NULL NULL LSMTREE +t1 1 a 1 a A # NULL NULL YES LSMTREE simple index on a +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result new file mode 100644 index 0000000000000..f55662183ca81 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result @@ -0,0 +1,84 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +TYPE NAME VALUE +MAX_INDEX_ID MAX_INDEX_ID max_index_id +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +count(*) +3 +select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; +CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +TYPE NAME VALUE +BINLOG FILE master-bin.000001 +BINLOG POS 1066 +BINLOG GTID uuid:5 +MAX_INDEX_ID MAX_INDEX_ID max_index_id +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +count(*) +6 +set global rocksdb_force_flush_memtable_now = true; +set global rocksdb_compact_cf='default'; +select case when VALUE-@keysIn >= 3 then 'true' else 'false' end from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; +case when VALUE-@keysIn >= 3 then 'true' else 'false' end +true +CREATE INDEX tindex1 on t1 (i1); +CREATE INDEX tindex2 on t1 (i2); +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; +TYPE NAME VALUE +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +CREATE TABLE t2 ( +a int, +b int, +c int, +d int, +e int, +PRIMARY KEY (a) COMMENT "cf_a", +KEY (b) COMMENT "cf_b", +KEY (c) COMMENT "cf_c", +KEY (d) COMMENT "$per_index_cf", +KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB; +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; +TYPE NAME VALUE +CF_FLAGS 0 default [0] +CF_FLAGS 1 __system__ [0] +CF_FLAGS 2 cf_a [0] +CF_FLAGS 3 cf_b [0] +CF_FLAGS 4 cf_c [0] +CF_FLAGS 5 test.t2.d [2] +CF_FLAGS 6 rev:cf_d [1] +CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB; +insert into t3 (a) values (1), (2), (3); +SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work OFF +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work ON +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work ON +DROP TABLE t3; +cf_id:0,index_id:268 +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work OFF +next line shouldn't cause assertion to fail +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +Variable_name Value +rocksdb_pause_background_work OFF +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result new file mode 100644 index 0000000000000..c49dbba751abb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result @@ -0,0 +1,344 @@ +SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; +trx_id trx_state trx_started trx_requested_lock_id trx_wait_started trx_weight trx_mysql_thread_id trx_query trx_operation_state trx_tables_in_use trx_tables_locked trx_lock_structs trx_lock_memory_bytes trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks trx_last_foreign_key_error trx_adaptive_hash_latched trx_is_read_only trx_autocommit_non_locking +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; +lock_id lock_trx_id lock_mode lock_type lock_table lock_index lock_space lock_page lock_rec lock_data +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; +requesting_trx_id requested_lock_id blocking_trx_id blocking_lock_id +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; +page_size compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET; +page_size compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX; +database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET; +database_name table_name index_name compress_ops compress_ops_ok compress_time uncompress_ops uncompress_time +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM; +page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET; +page_size buffer_pool_instance pages_used pages_free relocation_ops relocation_time +SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS; +NAME SUBSYSTEM COUNT MAX_COUNT MIN_COUNT AVG_COUNT COUNT_RESET MAX_COUNT_RESET MIN_COUNT_RESET AVG_COUNT_RESET TIME_ENABLED TIME_DISABLED TIME_ELAPSED TIME_RESET STATUS TYPE COMMENT +metadata_table_handles_opened metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table handles opened +metadata_table_handles_closed metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table handles closed +metadata_table_reference_count metadata 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Table reference counter +lock_deadlocks lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of deadlocks +lock_timeouts lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of lock timeouts +lock_rec_lock_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times enqueued into record lock wait queue +lock_table_lock_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times enqueued into table lock wait queue +lock_rec_lock_requests lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of record locks requested +lock_rec_lock_created lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of record locks created +lock_rec_lock_removed lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of record locks removed from the lock queue +lock_rec_locks lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Current number of record locks on tables +lock_table_lock_created lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table locks created +lock_table_lock_removed lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of table locks removed from the lock queue +lock_table_locks lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Current number of table locks on tables +lock_row_lock_current_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of row locks currently being waited for (innodb_row_lock_current_waits) +lock_row_lock_time lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Time spent in acquiring row locks, in milliseconds (innodb_row_lock_time) +lock_row_lock_time_max lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value The maximum time to acquire a row lock, in milliseconds (innodb_row_lock_time_max) +lock_row_lock_waits lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of times a row lock had to be waited for (innodb_row_lock_waits) +lock_row_lock_time_avg lock 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value The average time to acquire a row lock, in milliseconds (innodb_row_lock_time_avg) +buffer_pool_size server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Server buffer pool size (all buffer pools) in bytes +buffer_pool_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of reads directly from disk (innodb_buffer_pool_reads) +buffer_pool_read_requests buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of logical read requests (innodb_buffer_pool_read_requests) +buffer_pool_write_requests buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of write requests (innodb_buffer_pool_write_requests) +buffer_pool_wait_free buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of times waited for free buffer (innodb_buffer_pool_wait_free) +buffer_pool_read_ahead buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages read as read ahead (innodb_buffer_pool_read_ahead) +buffer_pool_read_ahead_evicted buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Read-ahead pages evicted without being accessed (innodb_buffer_pool_read_ahead_evicted) +buffer_pool_pages_total buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Total buffer pool size in pages (innodb_buffer_pool_pages_total) +buffer_pool_pages_misc buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages for misc use such as row locks or the adaptive hash index (innodb_buffer_pool_pages_misc) +buffer_pool_pages_data buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages containing data (innodb_buffer_pool_pages_data) +buffer_pool_bytes_data buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer bytes containing data (innodb_buffer_pool_bytes_data) +buffer_pool_pages_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages currently dirty (innodb_buffer_pool_pages_dirty) +buffer_pool_bytes_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer bytes currently dirty (innodb_buffer_pool_bytes_dirty) +buffer_pool_pages_free buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Buffer pages currently free (innodb_buffer_pool_pages_free) +buffer_pages_created buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages created (innodb_pages_created) +buffer_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages written (innodb_pages_written) +buffer_index_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of index pages written (innodb_index_pages_written) +buffer_non_index_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of non index pages written (innodb_non_index_pages_written) +buffer_pages_read buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages read (innodb_pages_read) +buffer_pages0_read buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of page 0 read (innodb_pages0_read) +buffer_index_sec_rec_cluster_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of secondary record reads triggered cluster read +buffer_index_sec_rec_cluster_reads_avoided buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of secondary record reads avoided triggering cluster read +buffer_data_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Amount of data read in bytes (innodb_data_reads) +buffer_data_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Amount of data written in bytes (innodb_data_written) +buffer_flush_batch_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of flush batch +buffer_flush_batch_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times buffer flush list flush is called +buffer_flush_batch_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages scanned per flush batch scan +buffer_flush_batch_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of flush batch +buffer_flush_batches buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of flush batches +buffer_flush_batch_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a flush batch +buffer_flush_neighbor_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total neighbors flushed as part of neighbor flush +buffer_flush_neighbor buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times neighbors flushing is invoked +buffer_flush_neighbor_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a neighbor batch +buffer_flush_n_to_flush_requested buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages requested for flushing. +buffer_flush_n_to_flush_by_age buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages target by LSN Age for flushing. +buffer_flush_adaptive_avg_time_slot buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for adaptive flushing recently per slot. +buffer_LRU_batch_flush_avg_time_slot buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for LRU batch flushing recently per slot. +buffer_flush_adaptive_avg_time_thread buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for adaptive flushing recently per thread. +buffer_LRU_batch_flush_avg_time_thread buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for LRU batch flushing recently per thread. +buffer_flush_adaptive_avg_time_est buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Estimated time (ms) spent for adaptive flushing recently. +buffer_LRU_batch_flush_avg_time_est buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Estimated time (ms) spent for LRU batch flushing recently. +buffer_flush_avg_time buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Avg time (ms) spent for flushing recently. +buffer_flush_adaptive_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Numner of adaptive flushes passed during the recent Avg period. +buffer_LRU_batch_flush_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of LRU batch flushes passed during the recent Avg period. +buffer_flush_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of flushes passed during the recent Avg period. +buffer_LRU_get_free_loops buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Total loops in LRU get free. +buffer_LRU_get_free_waits buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Total sleep waits in LRU get free. +buffer_flush_avg_page_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Average number of pages at which flushing is happening +buffer_flush_lsn_avg_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Average redo generation rate +buffer_flush_pct_for_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Percent of IO capacity used to avoid max dirty page limit +buffer_flush_pct_for_lsn buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Percent of IO capacity used to avoid reusable redo space limit +buffer_flush_sync_waits buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times a wait happens due to sync flushing +buffer_flush_adaptive_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of adaptive flushing +buffer_flush_adaptive buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of adaptive batches +buffer_flush_adaptive_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as an adaptive batch +buffer_flush_sync_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of sync batches +buffer_flush_sync buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of sync batches +buffer_flush_sync_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a sync batch +buffer_flush_background_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of background batches +buffer_flush_background buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of background batches +buffer_flush_background_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as a background batch +buffer_LRU_batch_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of LRU batch +buffer_LRU_batch_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times LRU batch is called +buffer_LRU_batch_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages scanned per LRU batch call +buffer_LRU_batch_flush_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages flushed as part of LRU batches +buffer_LRU_batches_flush buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of LRU batches +buffer_LRU_batch_flush_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as an LRU batch +buffer_LRU_batch_evict_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages evicted as part of LRU batches +buffer_LRU_batches_evict buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of LRU batches +buffer_LRU_batch_evict_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Pages queued as an LRU batch +buffer_LRU_single_flush_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of single page LRU flush +buffer_LRU_single_flush_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times single page LRU flush is called +buffer_LRU_single_flush_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Page scanned per single LRU flush +buffer_LRU_single_flush_failure_count Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times attempt to flush a single page from LRU failed +buffer_LRU_get_free_search Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of searches performed for a clean page +buffer_LRU_search_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of LRU search +buffer_LRU_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times LRU search is performed +buffer_LRU_search_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Page scanned per single LRU search +buffer_LRU_unzip_search_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_owner Total pages scanned as part of LRU unzip search +buffer_LRU_unzip_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Number of times LRU unzip search is performed +buffer_LRU_unzip_search_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled set_member Page scanned per single LRU unzip search +buffer_page_read_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Leaf Pages read +buffer_page_read_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Non-leaf Pages read +buffer_page_read_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Leaf Pages read +buffer_page_read_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Non-Leaf Pages read +buffer_page_read_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Undo Log Pages read +buffer_page_read_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Inode Pages read +buffer_page_read_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Free List Pages read +buffer_page_read_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Bitmap Pages read +buffer_page_read_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of System Pages read +buffer_page_read_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Transaction System Pages read +buffer_page_read_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of File Space Header Pages read +buffer_page_read_xdes buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Extent Descriptor Pages read +buffer_page_read_blob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Uncompressed BLOB Pages read +buffer_page_read_zblob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of First Compressed BLOB Pages read +buffer_page_read_zblob2 buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Subsequent Compressed BLOB Pages read +buffer_page_read_other buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of other/unknown (old version of InnoDB) Pages read +buffer_page_written_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Leaf Pages written +buffer_page_written_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Non-leaf Pages written +buffer_page_written_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Leaf Pages written +buffer_page_written_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Index Non-Leaf Pages written +buffer_page_written_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Undo Log Pages written +buffer_page_written_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Index Inode Pages written +buffer_page_written_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Free List Pages written +buffer_page_written_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Insert Buffer Bitmap Pages written +buffer_page_written_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of System Pages written +buffer_page_written_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Transaction System Pages written +buffer_page_written_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of File Space Header Pages written +buffer_page_written_xdes buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Extent Descriptor Pages written +buffer_page_written_blob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Uncompressed BLOB Pages written +buffer_page_written_zblob buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of First Compressed BLOB Pages written +buffer_page_written_zblob2 buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Subsequent Compressed BLOB Pages written +buffer_page_written_other buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of other/unknown (old version InnoDB) Pages written +os_data_reads os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of reads initiated (innodb_data_reads) +os_data_writes os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of writes initiated (innodb_data_writes) +os_data_fsyncs os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of fsync() calls (innodb_data_fsyncs) +os_pending_reads os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of reads pending +os_pending_writes os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of writes pending +os_log_bytes_written os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Bytes of log written (innodb_os_log_written) +os_log_fsyncs os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of fsync log writes (innodb_os_log_fsyncs) +os_log_pending_fsyncs os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pending fsync write (innodb_os_log_pending_fsyncs) +os_log_pending_writes os 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pending log file writes (innodb_os_log_pending_writes) +trx_rw_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of read-write transactions committed +trx_ro_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of read-only transactions committed +trx_nl_ro_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of non-locking auto-commit read-only transactions committed +trx_commits_insert_update transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions committed with inserts and updates +trx_rollbacks transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions rolled back +trx_rollbacks_savepoint transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions rolled back to savepoint +trx_rollback_active transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of resurrected active transactions rolled back +trx_active_transactions transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of active transactions +trx_rseg_history_len transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Length of the TRX_RSEG_HISTORY list +trx_undo_slots_used transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo slots used +trx_undo_slots_cached transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo slots cached +trx_rseg_current_size transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Current rollback segment size in pages +purge_del_mark_records purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of delete-marked rows purged +purge_upd_exist_or_extern_records purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of purges on updates of existing records and updates on delete marked record with externally stored field +purge_invoked purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times purge was invoked +purge_undo_log_pages purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo log pages handled by the purge +purge_dml_delay_usec purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Microseconds DML to be delayed due to purge lagging +purge_stop_count purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Number of times purge was stopped +purge_resume_count purge 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Number of times purge was resumed +log_checkpoints recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of checkpoints +log_lsn_last_flush recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value LSN of Last flush +log_lsn_last_checkpoint recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value LSN at last checkpoint +log_lsn_current recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Current LSN value +log_lsn_checkpoint_age recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Current LSN value minus LSN at last checkpoint +log_lsn_buf_pool_oldest recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value The oldest modified block LSN in the buffer pool +log_max_modified_age_async recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Maximum LSN difference; when exceeded, start asynchronous preflush +log_max_modified_age_sync recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Maximum LSN difference; when exceeded, start synchronous preflush +log_pending_log_flushes recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Pending log flushes +log_pending_checkpoint_writes recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Pending checkpoints +log_num_log_io recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of log I/Os +log_waits recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of log waits due to small log buffer (innodb_log_waits) +log_write_requests recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of log write requests (innodb_log_write_requests) +log_writes recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of log writes (innodb_log_writes) +log_padded recovery 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Bytes of log padded for log write ahead +compress_pages_compressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages compressed +compress_pages_decompressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decompressed +compression_pad_increments compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times padding is incremented to avoid compression failures +compression_pad_decrements compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times padding is decremented due to good compressibility +compress_saved compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of bytes saved by page compression +compress_pages_page_compressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages compressed by page compression +compress_page_compressed_trim_op compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of TRIM operation performed by page compression +compress_pages_page_decompressed compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decompressed by page compression +compress_pages_page_compression_error compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of page compression errors +compress_pages_encrypted compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages encrypted +compress_pages_decrypted compression 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of pages decrypted +index_page_splits index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index page splits +index_page_merge_attempts index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index page merge attempts +index_page_merge_successful index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of successful index page merges +index_page_reorg_attempts index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index page reorganization attempts +index_page_reorg_successful index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of successful index page reorganizations +index_page_discards index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index pages discarded +adaptive_hash_searches adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of successful searches using Adaptive Hash Index +adaptive_hash_searches_btree adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of searches using B-tree on an index search +adaptive_hash_pages_added adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index pages on which the Adaptive Hash Index is built +adaptive_hash_pages_removed adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of index pages whose corresponding Adaptive Hash Index entries were removed +adaptive_hash_rows_added adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Adaptive Hash Index rows added +adaptive_hash_rows_removed adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Adaptive Hash Index rows removed +adaptive_hash_rows_deleted_no_hash_entry adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of rows deleted that did not have corresponding Adaptive Hash Index entries +adaptive_hash_rows_updated adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of Adaptive Hash Index rows updated +file_num_open_files file_system 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Number of files currently open (innodb_num_open_files) +ibuf_merges_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of inserted records merged by change buffering +ibuf_merges_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of deleted records merged by change buffering +ibuf_merges_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of purge records merged by change buffering +ibuf_merges_discard_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of insert merged operations discarded +ibuf_merges_discard_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of deleted merged operations discarded +ibuf_merges_discard_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of purge merged operations discarded +ibuf_merges change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of change buffer merges +ibuf_size change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Change buffer size in pages +innodb_master_thread_sleeps server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times (seconds) master thread sleeps +innodb_activity_count server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Current server activity count +innodb_master_active_loops server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times master thread performs its tasks when server is active +innodb_master_idle_loops server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of times master thread performs its tasks when server is idle +innodb_background_drop_table_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to process drop table list +innodb_ibuf_merge_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to process change buffer merge +innodb_log_flush_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to flush log records +innodb_mem_validate_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to do memory validation +innodb_master_purge_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent by master thread to purge records +innodb_dict_lru_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent to process DICT LRU list +innodb_dict_lru_count_active server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of tables evicted from DICT LRU list in the active loop +innodb_dict_lru_count_idle server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of tables evicted from DICT LRU list in the idle loop +innodb_checkpoint_usec server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Time (in microseconds) spent by master thread to do checkpoint +innodb_dblwr_writes server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of doublewrite operations that have been performed (innodb_dblwr_writes) +innodb_dblwr_pages_written server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages that have been written for doublewrite operations (innodb_dblwr_pages_written) +innodb_page_size server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value InnoDB page size in bytes (innodb_page_size) +innodb_rwlock_s_spin_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin waits due to shared latch request +innodb_rwlock_x_spin_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin waits due to exclusive latch request +innodb_rwlock_sx_spin_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin waits due to sx latch request +innodb_rwlock_s_spin_rounds server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin loop rounds due to shared latch request +innodb_rwlock_x_spin_rounds server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin loop rounds due to exclusive latch request +innodb_rwlock_sx_spin_rounds server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rwlock spin loop rounds due to sx latch request +innodb_rwlock_s_os_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of OS waits due to shared latch request +innodb_rwlock_x_os_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of OS waits due to exclusive latch request +innodb_rwlock_sx_os_waits server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of OS waits due to sx latch request +dml_reads dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows read +dml_inserts dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows inserted +dml_deletes dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows deleted +dml_updates dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of rows updated +dml_system_reads dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows read +dml_system_inserts dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows inserted +dml_system_deletes dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows deleted +dml_system_updates dml 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of system rows updated +ddl_background_drop_indexes ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of indexes waiting to be dropped after failed index creation +ddl_background_drop_tables ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of tables in background drop table list +ddl_online_create_index ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of indexes being created online +ddl_pending_alter_table ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of ALTER TABLE, CREATE INDEX, DROP INDEX in progress +ddl_sort_file_alter_table ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of sort files created during alter table +ddl_log_file_alter_table ddl 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of log files created during alter table +icp_attempts icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of attempts for index push-down condition checks +icp_no_match icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Index push-down condition does not match +icp_out_of_range icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Index push-down condition out of range +icp_match icp 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Index push-down condition matches +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD; +value +a +about +an +are +as +at +be +by +com +de +en +for +from +how +i +in +is +it +la +of +on +or +that +the +this +to +was +what +when +where +who +will +with +und +the +www +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED; +DOC_ID +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED; +DOC_ID +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE; +WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE; +WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG; +KEY VALUE +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS; +POOL_ID POOL_SIZE FREE_BUFFERS DATABASE_PAGES OLD_DATABASE_PAGES MODIFIED_DATABASE_PAGES PENDING_DECOMPRESS PENDING_READS PENDING_FLUSH_LRU PENDING_FLUSH_LIST PAGES_MADE_YOUNG PAGES_NOT_MADE_YOUNG PAGES_MADE_YOUNG_RATE PAGES_MADE_NOT_YOUNG_RATE NUMBER_PAGES_READ NUMBER_PAGES_CREATED NUMBER_PAGES_WRITTEN PAGES_READ_RATE PAGES_CREATE_RATE PAGES_WRITTEN_RATE NUMBER_PAGES_GET HIT_RATE YOUNG_MAKE_PER_THOUSAND_GETS NOT_YOUNG_MAKE_PER_THOUSAND_GETS NUMBER_PAGES_READ_AHEAD NUMBER_READ_AHEAD_EVICTED READ_AHEAD_RATE READ_AHEAD_EVICTED_RATE LRU_IO_TOTAL LRU_IO_CURRENT UNCOMPRESS_TOTAL UNCOMPRESS_CURRENT +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE; +POOL_ID BLOCK_ID SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE PAGE_STATE IO_FIX IS_OLD FREE_PAGE_CLOCK +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU; +POOL_ID LRU_POSITION SPACE PAGE_NUMBER PAGE_TYPE FLUSH_TYPE FIX_COUNT IS_HASHED NEWEST_MODIFICATION OLDEST_MODIFICATION ACCESS_TIME TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE COMPRESSED_SIZE COMPRESSED IO_FIX IS_OLD FREE_PAGE_CLOCK +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES; +TABLE_ID NAME FLAG N_COLS SPACE FILE_FORMAT ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS; +TABLE_ID NAME STATS_INITIALIZED NUM_ROWS CLUST_INDEX_SIZE OTHER_INDEX_SIZE MODIFIED_COUNTER AUTOINC REF_COUNT +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES; +INDEX_ID NAME TABLE_ID TYPE N_FIELDS PAGE_NO SPACE MERGE_THRESHOLD +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS; +TABLE_ID NAME POS MTYPE PRTYPE LEN +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS; +INDEX_ID NAME POS +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN; +ID FOR_NAME REF_NAME N_COLS TYPE +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; +ID FOR_COL_NAME REF_COL_NAME POS +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; +SPACE NAME FLAG FILE_FORMAT ROW_FORMAT PAGE_SIZE ZIP_PAGE_SIZE SPACE_TYPE FS_BLOCK_SIZE FILE_SIZE ALLOCATED_SIZE +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; +SPACE PATH diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert.result b/storage/rocksdb/mysql-test/rocksdb/r/insert.result new file mode 100644 index 0000000000000..a1fb3ae90be07 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert.result @@ -0,0 +1,202 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES (100,'foobar',100),(1,'a',101),(2,'b',103),(3,'c',104),(4,'d',105),(5,'e',106); +SELECT a,b FROM t1; +a b +1 a +100 foobar +2 b +3 c +4 d +5 e +INSERT t1 VALUE (10,'foo',107),(11,'abc',108); +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foobar +11 abc +2 b +3 c +4 d +5 e +INSERT INTO t1 (b,a) VALUES ('test',0); +SELECT a,b FROM t1; +a b +0 test +1 a +10 foo +100 foobar +11 abc +2 b +3 c +4 d +5 e +INSERT INTO t1 VALUES (DEFAULT,DEFAULT,NULL); +SELECT a,b FROM t1; +a b +0 test +1 a +10 foo +100 foobar +11 abc +2 b +3 c +4 d +5 e +NULL NULL +INSERT t1 (a) VALUE (10),(20); +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 foo +100 foobar +11 abc +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +INSERT INTO t1 SET a = 11, b = 'f'; +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 foo +100 foobar +11 abc +11 f +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +INSERT t1 SET b = DEFAULT; +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 foo +100 foobar +11 abc +11 f +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +NULL NULL +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 SELECT a,b,pk FROM t1; +INSERT INTO t1 (a) SELECT a FROM t2 WHERE b = 'foo'; +SELECT a,b FROM t1; +a b +0 test +1 a +10 NULL +10 NULL +10 foo +100 foobar +11 abc +11 f +2 b +20 NULL +3 c +4 d +5 e +NULL NULL +NULL NULL +INSERT t1 (a,b) SELECT a,b FROM t1; +SELECT a,b FROM t1; +a b +0 test +0 test +1 a +1 a +10 NULL +10 NULL +10 NULL +10 NULL +10 foo +10 foo +100 foobar +100 foobar +11 abc +11 abc +11 f +11 f +2 b +2 b +20 NULL +20 NULL +3 c +3 c +4 d +4 d +5 e +5 e +NULL NULL +NULL NULL +NULL NULL +NULL NULL +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +BEGIN; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'foo'); +INSERT t1 (a,b) VALUE (10,'foo'),(11,'abc'); +COMMIT; +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foo +11 abc +2 b +3 c +4 d +5 e +BEGIN; +INSERT INTO t1 (b,a) VALUES ('test',0); +SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +RELEASE SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +ROLLBACK; +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foo +11 abc +2 b +3 c +4 d +5 e +BEGIN; +INSERT t1 (a) VALUE (10),(20); +SAVEPOINT spt1; +INSERT INTO t1 SET a = 11, b = 'f'; +INSERT t1 SET b = DEFAULT; +ROLLBACK TO SAVEPOINT spt1; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +INSERT INTO t1 (b,a) VALUES ('test1',10); +COMMIT; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +SELECT a,b FROM t1; +a b +1 a +10 foo +100 foo +11 abc +2 b +3 c +4 d +5 e +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result new file mode 100644 index 0000000000000..ded480578546b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_optimized_config.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; +create table t1( +id bigint not null primary key, +i1 bigint, #unique +i2 bigint, #repeating +c1 varchar(20), #unique +c2 varchar(20), #repeating +index t1_2(i1) +) engine=rocksdb; +select count(*), sum(id), sum(i1), sum(i2) from t1; +count(*) sum(id) sum(i1) sum(i2) +50000 1250025000 1250025000 124980000 +select count(*), sum(id), sum(i1), sum(i2) from t1; +count(*) sum(id) sum(i1) sum(i2) +50000 1250025000 1250025000 124980000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result new file mode 100644 index 0000000000000..3ae0769338fe7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/insert_with_keys.result @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +SELECT a,b FROM t1; +a b +0 +1 a +1 a +100 a +12345 z +2 b +29 n +3 a +3 c +30 m +4 d +5 e +6 f +DROP TABLE t1; +#---------------------------------------- +# UNIQUE KEYS are not supported currently +#----------------------------------------- +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +ERROR 23000: Duplicate entry '3' for key 'PRIMARY' +INSERT INTO t1 (a,b) VALUES (0,''); +SELECT a,b FROM t1; +a b +0 +1 a +100 a +2 b +29 n +3 c +30 m +4 d +5 e +6 f +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +Warnings: +Warning 1062 Duplicate entry '1' for key 'PRIMARY' +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b); +SELECT a,b FROM t1; +a b +0 +1 aa +100 a +12345 zz +2 b +29 n +3 c +30 m +4 d +5 e +6 f +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue100.result b/storage/rocksdb/mysql-test/rocksdb/r/issue100.result new file mode 100644 index 0000000000000..ee73ac3e13416 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue100.result @@ -0,0 +1,23 @@ +create table t1 ( +id int, +value int, +primary key (id) +) engine=rocksdb; +insert into t1 values(1,1),(2,2); +set autocommit=0; +begin; +insert into t1 values (50,50); +select * from t1; +id value +1 1 +2 2 +50 50 +update t1 set id=id+100; +select * from t1; +id value +101 1 +102 2 +150 50 +rollback; +set autocommit=1; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result new file mode 100644 index 0000000000000..9e55ebd006f9e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue100_delete.result @@ -0,0 +1,17 @@ +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; +create table t100(pk int primary key, a int, b int, key(a)); +insert into t100 select a,a,a from test.one_k; +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; +num_rows entry_deletes entry_singledeletes +1000 0 0 +update t100 set a=a+1; +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; +num_rows entry_deletes entry_singledeletes +1000 0 0 +1000 0 1000 +drop table ten, t100, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result new file mode 100644 index 0000000000000..e15519c3d7acb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result @@ -0,0 +1,37 @@ +connect con2,localhost,root,,; +connection default; +create table t1 ( +pk int not null primary key, +col1 int not null, +col2 int not null, +key(col1) +) engine=rocksdb; +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; +insert into t1 select a,a,a from one_k; +# Start the transaction, get the snapshot +begin; +select * from t1 where col1<10; +pk col1 col2 +0 0 0 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +# Connect with another connection and make a conflicting change +connection con2; +begin; +update t1 set col2=123456 where pk=0; +commit; +connection default; +update t1 set col2=col2+1 where col1 < 10 limit 5; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +disconnect con2; +drop table t1, ten, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue290.result b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result new file mode 100644 index 0000000000000..1a83a93bcbbc2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue290.result @@ -0,0 +1,28 @@ +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`id2`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +set global rocksdb_force_flush_memtable_now=1; +insert into linktable (id1, link_type, id2) values (2, 1, 1); +insert into linktable (id1, link_type, id2) values (2, 1, 2); +insert into linktable (id1, link_type, id2) values (2, 1, 3); +insert into linktable (id1, link_type, id2) values (2, 1, 4); +insert into linktable (id1, link_type, id2) values (2, 1, 5); +insert into linktable (id1, link_type, id2) values (2, 1, 6); +insert into linktable (id1, link_type, id2) values (2, 1, 7); +insert into linktable (id1, link_type, id2) values (2, 1, 8); +insert into linktable (id1, link_type, id2) values (2, 1, 9); +insert into linktable (id1, link_type, id2) values (2, 1, 10); +explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE linktable range PRIMARY PRIMARY 24 NULL # Using where +drop table linktable; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue314.result b/storage/rocksdb/mysql-test/rocksdb/r/issue314.result new file mode 100644 index 0000000000000..eee908002860a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue314.result @@ -0,0 +1,12 @@ +drop table if exists t1; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a int); +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO t1 VALUES(1); +select * from t1; +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +select * from t1; +a +1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue495.result b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result new file mode 100644 index 0000000000000..c7ac34c629445 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result @@ -0,0 +1,30 @@ +drop table if exists t; +create table t ( +a int, +b int, +c varchar(12249) collate latin1_bin, +d datetime, +e int, +f int, +g blob, +h int, +i int, +key (b,e), +key (h,b) +) engine=rocksdb +partition by linear hash (i) partitions 8 ; +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +select i from t group by h; +i +1 +select i from t group by h; +i +1 +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result new file mode 100644 index 0000000000000..b68b37cf6c5ca --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_read_committed.result @@ -0,0 +1,111 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +connection con1; +SELECT a FROM t1; +a +connection con2; +INSERT INTO t1 (a) VALUES (2); +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+100 FROM t1; +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +1 +2 +COMMIT; +SELECT a FROM t1; +a +1 +2 +connection con1; +SELECT a FROM t1; +a +1 +2 +INSERT INTO t1 (a) SELECT a+200 FROM t1; +SELECT a FROM t1; +a +1 +2 +201 +202 +COMMIT; +SELECT a FROM t1; +a +1 +2 +201 +202 +connection con2; +SELECT a FROM t1; +a +1 +2 +201 +202 +connection default; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a) VALUES (1); +COMMIT; +connection con1; +BEGIN; +SELECT a from t2; +a +1 +INSERT INTO t2 (a) VALUES (1), (3); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +connection con2; +INSERT INTO t2 (a) VALUES (2); +COMMIT; +connection con1; +SELECT a from t2; +a +1 +2 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t3 ( +pk int unsigned PRIMARY KEY, +count int unsigned DEFAULT '0' +) ENGINE=ROCKSDB; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +connection con1; +BEGIN; +SELECT * FROM t3; +pk count +connection con2; +BEGIN; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +connection con1; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +SELECT count FROM t3; +count +1 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result b/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result new file mode 100644 index 0000000000000..68fbe5632cbff --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_read_uncommitted.result @@ -0,0 +1,116 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +connection con1; +CREATE TABLE t1 (a ) ENGINE= ; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +1 +connection con2; +INSERT INTO t1 (a) VALUES (2); +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +1 +2 +INSERT INTO t1 (a) SELECT a+100 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +1 +101 +102 +2 +connection con2; +SELECT a FROM t1; +a +1 +101 +102 +2 +COMMIT; +SELECT a FROM t1; +a +1 +101 +102 +2 +connection con1; +SELECT a FROM t1; +a +1 +101 +102 +2 +INSERT INTO t1 (a) SELECT a+200 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +1 +101 +102 +2 +201 +202 +301 +302 +COMMIT; +SELECT a FROM t1; +a +1 +101 +102 +2 +201 +202 +301 +302 +connection con2; +SELECT a FROM t1; +a +1 +101 +102 +2 +201 +202 +301 +302 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (a ) ENGINE= ; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +INSERT INTO t1 (a) VALUES (1); +connection con1; +# If consistent read works on this isolation level (READ UNCOMMITTED), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +a +1 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result new file mode 100644 index 0000000000000..13da8a0ffeb41 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_repeatable_read.result @@ -0,0 +1,100 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connection con1; +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +connection con1; +SELECT a FROM t1; +a +connection con2; +INSERT INTO t1 (a) VALUES (2); +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+100 FROM t1; +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +1 +2 +COMMIT; +SELECT a FROM t1; +a +1 +2 +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+200 FROM t1; +SELECT a FROM t1; +a +COMMIT; +SELECT a FROM t1; +a +1 +2 +connection con2; +SELECT a FROM t1; +a +1 +2 +connection default; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a) VALUES (1); +COMMIT; +connection con1; +BEGIN; +SELECT a from t2; +a +1 +INSERT INTO t2 (a) VALUES (1), (3); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +connection con2; +INSERT INTO t2 (a) VALUES (2); +COMMIT; +connection con1; +SELECT a from t2; +a +1 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; +CREATE TABLE t3 ( +pk int unsigned PRIMARY KEY, +count int unsigned DEFAULT '0' +) ENGINE=ROCKSDB; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +connection con1; +BEGIN; +SELECT * FROM t3; +pk count +connection con2; +BEGIN; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +connection con1; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; +SELECT count FROM t3; +count +0 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result b/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result new file mode 100644 index 0000000000000..3f57395fa374d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/level_serializable.result @@ -0,0 +1,56 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +connection con1; +CREATE TABLE t1 (a ) ENGINE= ; +START TRANSACTION; +SELECT a FROM t1; +a +connection con2; +BEGIN; +INSERT INTO t1 (a) VALUES(1); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +connection con2; +INSERT INTO t1 (a) VALUES (2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# WARNING: Statement ended with errno 1205, errname 'ER_LOCK_WAIT_TIMEOUT'. +# If it differs from the result file, it might indicate a problem. +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+100 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +COMMIT; +SELECT a FROM t1; +a +connection con1; +SELECT a FROM t1; +a +INSERT INTO t1 (a) SELECT a+200 FROM t1; +# WARNING: Statement ended with errno 0, errname ''. +# If it differs from the result file, it might indicate a problem. +SELECT a FROM t1; +a +COMMIT; +SELECT a FROM t1; +a +connection con2; +SELECT a FROM t1; +a +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result new file mode 100644 index 0000000000000..a9f9c0b49e85e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/loaddata.result @@ -0,0 +1,239 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ',' (a,b); +SELECT a,b FROM t1; +a b +1 foo +2 bar +3 +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +CHARACTER SET utf8 COLUMNS TERMINATED BY ',' + ESCAPED BY '/' (a,b); +SELECT a,b FROM t1; +a b +1 foo +1 foo +2 bar +2 bar +3 +3 +4 abc +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + (a) SET b='loaded'; +Warnings: +Warning 1262 Row 1 was truncated; it contained more data than there were input columns +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +Warning 1262 Row 3 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 loaded +1 foo +1 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + OPTIONALLY ENCLOSED BY '''' + LINES STARTING BY 'prefix:' +IGNORE 2 LINES (a,b); +Warnings: +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1; +Warnings: +Warning 1261 Row 1 doesn't contain data for all columns +Warning 1261 Row 2 doesn't contain data for all columns +Warning 1261 Row 3 doesn't contain data for all columns +Warning 1261 Row 4 doesn't contain data for all columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +3 +3 +3 +4 abc +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_replacedata.dat' REPLACE INTO TABLE t1; +Warnings: +Warning 1261 Row 1 doesn't contain data for all columns +Warning 1261 Row 2 doesn't contain data for all columns +Warning 1261 Row 3 doesn't contain data for all columns +Warning 1261 Row 4 doesn't contain data for all columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 aaa +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +2 bbb +3 +3 +3 +3 ccc +4 abc +4 abc +4 abc +4 ddd +5 loaded +7 test +DROP TABLE t1; +set session unique_checks=0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ',' (a,b); +SELECT a,b FROM t1; +a b +1 foo +2 bar +3 +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +CHARACTER SET utf8 COLUMNS TERMINATED BY ',' + ESCAPED BY '/' (a,b); +SELECT a,b FROM t1; +a b +1 foo +1 foo +2 bar +2 bar +3 +3 +4 abc +4 abc +LOAD DATA LOCAL INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + (a) SET b='loaded'; +Warnings: +Warning 1262 Row 1 was truncated; it contained more data than there were input columns +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +Warning 1262 Row 3 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 loaded +1 foo +1 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1 +FIELDS TERMINATED BY ';' + OPTIONALLY ENCLOSED BY '''' + LINES STARTING BY 'prefix:' +IGNORE 2 LINES (a,b); +Warnings: +Warning 1262 Row 2 was truncated; it contained more data than there were input columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +3 +3 +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_loaddata.dat' INTO TABLE t1; +Warnings: +Warning 1261 Row 1 doesn't contain data for all columns +Warning 1261 Row 2 doesn't contain data for all columns +Warning 1261 Row 3 doesn't contain data for all columns +Warning 1261 Row 4 doesn't contain data for all columns +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +3 +3 +3 +4 abc +4 abc +4 abc +5 loaded +7 test +LOAD DATA INFILE '/se_replacedata.dat' REPLACE INTO TABLE t1; +ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: LOAD DATA INFILE '/se_replacedata.dat' REPLACE INTO TABLE t1 +SELECT a,b FROM t1; +a b +0 +0 loaded +1 foo +1 foo +1 foo +100 foo +102 loaded +2 bar +2 bar +2 bar +3 +3 +3 +4 abc +4 abc +4 abc +5 loaded +7 test +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock.result b/storage/rocksdb/mysql-test/rocksdb/r/lock.result new file mode 100644 index 0000000000000..36fea5b937fec --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock.result @@ -0,0 +1,106 @@ +connect con1,localhost,root,,; +SET lock_wait_timeout=1; +connection default; +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3); +LOCK TABLE t1 LOW_PRIORITY WRITE; +SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2; +id2 COUNT(DISTINCT id) +1 1 +2 1 +3 1 +UPDATE t1 SET id=-1 WHERE id=1; +connection con1; +SELECT id,id2 FROM t1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +LOCK TABLE t1 READ; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection default; +LOCK TABLE t1 READ; +UPDATE t1 SET id=1 WHERE id=1; +ERROR HY000: Table 't1' was locked with a READ lock and can't be updated +connection con1; +SELECT COUNT(DISTINCT id) FROM t1; +COUNT(DISTINCT id) +1 +UPDATE t1 SET id=2 WHERE id=2; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +LOCK TABLE t1 WRITE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +LOCK TABLE t1 READ; +UNLOCK TABLES; +connection default; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +CREATE TABLE t2 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLE t1 WRITE, t2 WRITE; +INSERT INTO t2 (id,id2) SELECT id,id2 FROM t1; +UPDATE t1 SET id=1 WHERE id=-1; +DROP TABLE t1,t2; +CREATE TABLE t1 (i1 INT, nr INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +CREATE TABLE t2 (nr INT, nm INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (nr,nm) VALUES (1,3); +INSERT INTO t2 (nr,nm) VALUES (2,4); +LOCK TABLES t1 WRITE, t2 READ; +INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3; +INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4; +UNLOCK TABLES; +LOCK TABLES t1 WRITE; +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1; +ERROR HY000: Table 't1' was not locked with LOCK TABLES +UNLOCK TABLES; +LOCK TABLES t1 WRITE, t1 AS t1_alias READ; +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias; +DROP TABLE t1,t2; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +DROP TABLE t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE; +DROP TABLE t2, t3, t1; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ; +ALTER TABLE t2 ADD COLUMN c2 INT; +DROP TABLE t1, t2, t3; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +LOCK TABLE t1 READ, t2 READ; +FLUSH TABLE t1; +ERROR HY000: Table 't1' was locked with a READ lock and can't be updated +FLUSH TABLES; +ERROR HY000: Table 't2' was locked with a READ lock and can't be updated +FLUSH TABLES t1, t2 WITH READ LOCK; +ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +UNLOCK TABLES; +FLUSH TABLES t1, t2 WITH READ LOCK; +connection con1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection default; +UNLOCK TABLES; +FLUSH TABLES WITH READ LOCK; +connection con1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection default; +UNLOCK TABLES; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +FLUSH TABLES WITH READ LOCK; +DROP TABLE t1, t2; +ERROR HY000: Can't execute the query because you have a conflicting read lock +UNLOCK TABLES; +DROP TABLE t1, t2; +disconnect con1; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +LOCK TABLE t1 WRITE, t2 WRITE; +SELECT a,b FROM t1; +a b +UNLOCK TABLES; +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_info.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_info.result new file mode 100644 index 0000000000000..d0f1221e47295 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_info.result @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +create table t1 (a int, primary key(a) comment 'lock_into_cf1') engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); +create table t2 (a int, primary key(a) comment 'lock_info_cf2') engine=rocksdb; +insert into t2 values (1); +insert into t2 values (2); +set autocommit=0; +select * from t1 for update; +a +1 +2 +select * from t2 for update; +a +1 +2 +use information_schema; +select rocksdb_ddl.cf, rocksdb_locks.transaction_id, rocksdb_locks.key +from rocksdb_locks +left join rocksdb_ddl +on rocksdb_locks.column_family_id=rocksdb_ddl.column_family +order by rocksdb_ddl.cf; +cf transaction_id key +lock_info_cf2 _txn_id_ _key_ +lock_info_cf2 _txn_id_ _key_ +lock_into_cf1 _txn_id_ _key_ +lock_into_cf1 _txn_id_ _key_ +use test; +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result new file mode 100644 index 0000000000000..7898489d98d57 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_rows_not_exist.result @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t (id1 int, id2 int, id3 int, value int, PRIMARY KEY (id1, id2, id3)) ENGINE=RocksDB; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +id1 id2 id3 value +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; +id1 id2 id3 value +connection con1; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +connection con2; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; +connection con1; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +connection con2; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result new file mode 100644 index 0000000000000..d160b81f10efa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/locking_issues.result @@ -0,0 +1,674 @@ + +----------------------------------------------------------------------- +- Locking issues case 1.1: +- Locking rows that do not exist when using all primary key columns in +- a WHERE clause +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +id1 id2 value +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +INSERT INTO t0 VALUES (1,5,0); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 1.1: +- Locking rows that do not exist when using all primary key columns in +- a WHERE clause +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +id1 id2 value +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +INSERT INTO t0 VALUES (1,5,0); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SELECT * FROM t0 WHERE id1=1 AND id2=5 FOR UPDATE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 1.2: +- Locking rows that do not exist without using all primary key +- columns in a WHERE clause +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE id1=1 FOR UPDATE; +id1 id2 value +1 1 0 +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; +id1 id2 value +INSERT INTO t0 VALUES (1,5,0); +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 1.2: +- Locking rows that do not exist without using all primary key +- columns in a WHERE clause +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id1 INT, id2 INT, value INT, PRIMARY KEY(id1, id2)); +INSERT INTO t0 VALUES (1,1,0), (3,3,0), (4,4,0), (6,6,0); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE id1=1 FOR UPDATE; +id1 id2 value +1 1 0 +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE id1=1 AND id2=4 FOR UPDATE; +id1 id2 value +INSERT INTO t0 VALUES (1,5,0); +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using REPEATABLE READ transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +connection con1; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +connection con2; +UPDATE t0 SET VALUE=10 WHERE id=1; +UPDATE t0 SET VALUE=10 WHERE id=5; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; +connection con2; +SELECT * FROM t0 WHERE id=4 FOR UPDATE; +id value +4 0 +COMMIT; +SELECT * FROM t0; +id value +1 10 +2 1 +3 0 +4 0 +5 1 +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using READ COMMITTED transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +connection con1; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +connection con2; +UPDATE t0 SET VALUE=10 WHERE id=1; +UPDATE t0 SET VALUE=10 WHERE id=5; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +UPDATE t0 SET value=100 WHERE id in (4,5) and value>0; +connection con2; +SELECT * FROM t0 WHERE id=4 FOR UPDATE; +id value +4 0 +COMMIT; +SELECT * FROM t0; +id value +1 10 +2 1 +3 0 +4 0 +5 1 +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using REPEATABLE READ transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +connection con1; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +connection con2; +UPDATE t0 SET VALUE=10 WHERE id=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; +SET GLOBAL rocksdb_lock_scanned_rows=0; + +----------------------------------------------------------------------- +- Locking issues case 2: +- Rows that are scanned but do not match the WHERE are not locked +- using READ COMMITTED transaction isolation level unless +- rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t0(id INT PRIMARY KEY, value INT); +INSERT INTO t0 VALUES (1,0), (2,1), (3,0), (4,0), (5,1); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +connection con1; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +id value +2 1 +5 1 +connection con2; +UPDATE t0 SET VALUE=10 WHERE id=1; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; +SET GLOBAL rocksdb_lock_scanned_rows=0; + +----------------------------------------------------------------------- +- Locking issues case 3: +- After creating a snapshot, other clients updating rows +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; +connection con1; +ERROR: 1213 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 3: +- After creating a snapshot, other clients updating rows +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +UPDATE t0 SET VALUE=VALUE+1 WHERE id=190000; +connection con1; +id value +190000 1 +ERROR: 0 +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 4: +- Phantom rows +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +INSERT INTO t0 VALUES(200001,1), (-1,1); +connection con1; +id value +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 4: +- Phantom rows +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO t0 VALUES(200001,1), (-1,1); +connection con1; +id value +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 5: +- Deleting primary key +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +DELETE FROM t0 WHERE id=190000; +COMMIT; +connection con1; +ERROR: 1213 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 5: +- Deleting primary key +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +DELETE FROM t0 WHERE id=190000; +COMMIT; +connection con1; +id value +ERROR: 0 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 6: +- Changing primary key +- using REPEATABLE READ transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +UPDATE t0 SET id=200001 WHERE id=190000; +COMMIT; +connection con1; +ERROR: 1213 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 6: +- Changing primary key +- using READ COMMITTED transaction isolation level +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t0; +CREATE TABLE t0(id INT AUTO_INCREMENT PRIMARY KEY, value INT); +Inserting 200,000 rows +UPDATE t0 SET value=100 WHERE id=190000; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t0 WHERE value > 0 FOR UPDATE; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +UPDATE t0 SET id=200001 WHERE id=190000; +COMMIT; +connection con1; +id value +ERROR: 0 +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t0; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +lock_scanned_rows is 0 +connection con1; +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; +UPDATE t2 SET value=value+100; +SELECT * FROM t2; +id value +1 101 +2 102 +3 103 +4 104 +5 105 +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +lock_scanned_rows is 0 +connection con1; +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; +UPDATE t2 SET value=value+100; +SELECT * FROM t2; +id value +1 101 +2 102 +3 103 +4 104 +5 105 +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +lock_scanned_rows is 1 +connection con1; +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; +UPDATE t2 SET value=value+100 WHERE id=3; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); +SELECT * FROM t2; +id value +1 101 +2 102 +3 3 +4 104 +5 105 +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_lock_scanned_rows=0; + +----------------------------------------------------------------------- +- Locking issues case 7: +- Rows that are scanned as part of a query but not in the table being +- updated should not be locked unless rocksdb_lock_scanned_rows is on +----------------------------------------------------------------------- +DROP TABLE IF EXISTS t1, t2; +SELECT @@global.rocksdb_lock_scanned_rows; +@@global.rocksdb_lock_scanned_rows +0 +SET GLOBAL rocksdb_lock_scanned_rows=ON; +CREATE TABLE t1(id INT PRIMARY KEY, value INT); +CREATE TABLE t2(id INT PRIMARY KEY, value INT); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +INSERT INTO t2 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +lock_scanned_rows is 1 +connection con1; +UPDATE t1 JOIN t2 ON t1.id = t2.id SET t1.value=t1.value+100 WHERE t2.id=3; +connection con2; +UPDATE t2 SET value=value+100 WHERE id=3; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +UPDATE t2 SET value=value+100 WHERE id IN (1,2,4,5); +SELECT * FROM t2; +id value +1 101 +2 102 +3 3 +4 104 +5 105 +connection con1; +COMMIT; +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_lock_scanned_rows=0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result new file mode 100644 index 0000000000000..4a39f1cbff4fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result @@ -0,0 +1,95 @@ +INSERT INTO mysql.event ( +db, +name, +body, +definer, +interval_value, +interval_field, +originator, +character_set_client, +collation_connection, +db_collation, +body_utf8) +values ( +database(), +"ev1", +"select 1", +user(), +100, +"SECOND_MICROSECOND", +1, +'utf8', +'utf8_general_ci', +'utf8_general_ci', +'select 1'); +SHOW EVENTS; +ERROR 42000: This version of MariaDB doesn't yet support 'MICROSECOND' +DROP EVENT ev1; +SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME; +TABLE_NAME COLUMN_NAME REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +Warning 1286 Unknown storage engine 'InnoDB' +Warning 1286 Unknown storage engine 'InnoDB' +Warnings: +column_stats column_name NULL NULL +column_stats db_name NULL NULL +column_stats table_name NULL NULL +columns_priv Column_name NULL NULL +columns_priv Db NULL NULL +columns_priv Host NULL NULL +columns_priv Table_name NULL NULL +columns_priv User NULL NULL +db Db NULL NULL +db Host NULL NULL +db User NULL NULL +event db NULL NULL +event name NULL NULL +func name NULL NULL +gtid_slave_pos domain_id NULL NULL +gtid_slave_pos sub_id NULL NULL +help_category help_category_id NULL NULL +help_category name NULL NULL +help_keyword help_keyword_id NULL NULL +help_keyword name NULL NULL +help_relation help_keyword_id NULL NULL +help_relation help_topic_id NULL NULL +help_topic help_topic_id NULL NULL +help_topic name NULL NULL +host Db NULL NULL +host Host NULL NULL +index_stats db_name NULL NULL +index_stats index_name NULL NULL +index_stats prefix_arity NULL NULL +index_stats table_name NULL NULL +plugin name NULL NULL +proc db NULL NULL +proc name NULL NULL +proc type NULL NULL +procs_priv Db NULL NULL +procs_priv Host NULL NULL +procs_priv Routine_name NULL NULL +procs_priv Routine_type NULL NULL +procs_priv User NULL NULL +proxies_priv Host NULL NULL +proxies_priv Proxied_host NULL NULL +proxies_priv Proxied_user NULL NULL +proxies_priv User NULL NULL +roles_mapping Host NULL NULL +roles_mapping Role NULL NULL +roles_mapping User NULL NULL +servers Server_name NULL NULL +table_stats db_name NULL NULL +table_stats table_name NULL NULL +tables_priv Db NULL NULL +tables_priv Host NULL NULL +tables_priv Table_name NULL NULL +tables_priv User NULL NULL +time_zone Time_zone_id NULL NULL +time_zone_leap_second Transition_time NULL NULL +time_zone_name Name NULL NULL +time_zone_transition Time_zone_id NULL NULL +time_zone_transition Transition_time NULL NULL +time_zone_transition_type Time_zone_id NULL NULL +time_zone_transition_type Transition_type_id NULL NULL +user Host NULL NULL +user User NULL NULL diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result new file mode 100644 index 0000000000000..835361eea352e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqlbinlog_gtid_skip_empty_trans_rocksdb.result @@ -0,0 +1,143 @@ +reset master; +set timestamp=1000000000; +set SESSION binlog_format = 'ROW'; +create database test2; +create database test3; +use test; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb; +insert into t1 values(1, 'a'); +insert into t1 values(2, 'b'); +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb; +start transaction; +insert into t2 values(1, 'a'); +insert into t2 values(2, 'b'); +insert into t2 values(3, 'c'); +insert into t2 values(4, 'd'); +commit; +use test2; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb; +insert into t1 values(1, 'a'); +insert into t1 values(2, 'b'); +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb; +start transaction; +insert into t2 values(1, 'a'); +insert into t2 values(2, 'b'); +insert into t2 values(3, 'c'); +insert into t2 values(4, 'd'); +commit; +use test3; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb; +insert into t1 values(1, 'a'); +insert into t1 values(2, 'b'); +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb; +start transaction; +insert into t2 values(1, 'a'); +insert into t2 values(2, 'b'); +insert into t2 values(3, 'c'); +insert into t2 values(4, 'd'); +commit; +FLUSH LOGS; +==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ==== +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +ROLLBACK/*!*/; +SET TIMESTAMP=1000000000/*!*/; +SET @@session.pseudo_thread_id=999999999/*!*/; +SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/; +SET @@session.sql_mode=1073741824/*!*/; +SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/; +/*!\C latin1 *//*!*/; +SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/; +SET @@session.lc_time_names=0/*!*/; +SET @@session.collation_database=DEFAULT/*!*/; +create database test2 +/*!*/; +use `test2`/*!*/; +SET TIMESTAMP=1000000000/*!*/; +create table t1 (a int primary key, b char(8)) ENGINE=rocksdb +/*!*/; +SET TIMESTAMP=1000000000/*!*/; +BEGIN +/*!*/; +COMMIT/*!*/; +SET TIMESTAMP=1000000000/*!*/; +BEGIN +/*!*/; +COMMIT/*!*/; +SET TIMESTAMP=1000000000/*!*/; +create table t2 (a int primary key, b char(8)) ENGINE=rocksdb +/*!*/; +SET TIMESTAMP=1000000000/*!*/; +BEGIN +/*!*/; +COMMIT/*!*/; +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +use test2; +start transaction; +insert into t2 values(5, 'e'); +insert into t2 values(6, 'f'); +use test; +insert into t2 values(7, 'g'); +insert into t2 values(8, 'h'); +commit; +FLUSH LOGS; +==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ==== +==== DB changed in the middle of the transaction, which belongs to the selected database +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +SET TIMESTAMP=1000000000/*!*/; +SET @@session.pseudo_thread_id=999999999/*!*/; +SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/; +SET @@session.sql_mode=1073741824/*!*/; +SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/; +/*!\C latin1 *//*!*/; +SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/; +SET @@session.lc_time_names=0/*!*/; +SET @@session.collation_database=DEFAULT/*!*/; +BEGIN +/*!*/; +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +use test; +start transaction; +insert into t2 values(9, 'i'); +insert into t2 values(10, 'j'); +use test2; +insert into t2 values(11, 'k'); +insert into t2 values(12, 'l'); +commit; +FLUSH LOGS; +==== Output of mysqlbinlog with --short-form --skip-empty-trans, --database and --skip-gtids options ==== +==== DB changed in the middle of the transaction, which belongs to the non-selected database +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; +/*!40019 SET @@session.max_insert_delayed_threads=0*/; +/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/; +DELIMITER /*!*/; +DELIMITER ; +# End of log file +ROLLBACK /* added by mysqlbinlog */; +/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/; +/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/; +use test; +drop table t1; +drop table if exists t2; +use test2; +drop table t1; +drop table if exists t2; +use test3; +drop table t1; +drop table if exists t2; +drop database test2; +drop database test3; +FLUSH LOGS; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result new file mode 100644 index 0000000000000..7e17c98668c54 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump.result @@ -0,0 +1,179 @@ +drop table if exists r1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +create table r1 (id1 int, id2 int, id3 varchar(100), id4 int, value1 int, value2 int, value3 int, value4 int, primary key (id1, id2, id3, id4)) engine=rocksdb; +insert into r1 values (1,1,1,1,1,1,1,1); +insert into r1 values (1,1,1,2,2,2,2,2); +insert into r1 values (1,1,2,1,3,3,3,3); +insert into r1 values (1,1,2,2,4,4,4,4); +insert into r1 values (1,2,1,1,5,5,5,5); +insert into r1 values (1,2,1,2,6,6,6,6); +insert into r1 values (1,2,2,1,7,7,7,7); +insert into r1 values (1,2,2,2,8,8,8,8); +insert into r1 values (2,1,1,1,9,9,9,9); +insert into r1 values (2,1,1,2,10,10,10,10); +insert into r1 values (2,1,2,1,11,11,11,11); +insert into r1 values (2,1,2,2,12,12,12,12); +insert into r1 values (2,2,1,1,13,13,13,13); +insert into r1 values (2,2,1,2,14,14,14,14); +insert into r1 values (2,2,2,1,15,15,15,15); +insert into r1 values (2,2,2,2,16,16,16,16); +connection con2; +BEGIN; +insert into r1 values (5,5,5,5,5,5,5,5); +update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1'; + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +/*!50601 SELECT count(*) INTO @is_rocksdb_supported FROM information_schema.SESSION_VARIABLES WHERE variable_name='rocksdb_bulk_load' */; +/*!50601 SET @enable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=1', 'SET @dummy = 0') */; +/*!50601 PREPARE s FROM @enable_bulk_load */; +/*!50601 EXECUTE s */; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=4832; +-- SET GLOBAL gtid_slave_pos='0-1-18'; +DROP TABLE IF EXISTS `r1`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `r1` ( + `id1` int(11) NOT NULL, + `id2` int(11) NOT NULL, + `id3` varchar(100) NOT NULL, + `id4` int(11) NOT NULL, + `value1` int(11) DEFAULT NULL, + `value2` int(11) DEFAULT NULL, + `value3` int(11) DEFAULT NULL, + `value4` int(11) DEFAULT NULL, + PRIMARY KEY (`id1`,`id2`,`id3`,`id4`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; +/* ORDERING KEY (DESC) : PRIMARY */; + +LOCK TABLES `r1` WRITE; +/*!40000 ALTER TABLE `r1` DISABLE KEYS */; +INSERT INTO `r1` VALUES (2,2,'2',2,16,16,16,16),(2,2,'2',1,15,15,15,15),(2,2,'1',2,14,14,14,14),(2,2,'1',1,13,13,13,13),(2,1,'2',2,12,12,12,12),(2,1,'2',1,11,11,11,11),(2,1,'1',2,10,10,10,10),(2,1,'1',1,9,9,9,9),(1,2,'2',2,8,8,8,8),(1,2,'2',1,7,7,7,7),(1,2,'1',2,6,6,6,6),(1,2,'1',1,5,5,5,5),(1,1,'2',2,4,4,4,4),(1,1,'2',1,3,3,3,3),(1,1,'1',2,2,2,2,2),(1,1,'1',1,1,1,1,1); +/*!40000 ALTER TABLE `r1` ENABLE KEYS */; +UNLOCK TABLES; +/*!50601 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=0', 'SET @dummy = 0') */; +/*!50601 PREPARE s FROM @disable_bulk_load */; +/*!50601 EXECUTE s */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +rollback; +connection con1; +1 +set @save_default_storage_engine=@@global.default_storage_engine; +SET GLOBAL default_storage_engine=rocksdb; + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=4832; +-- SET GLOBAL gtid_slave_pos='0-1-18'; +DROP TABLE IF EXISTS `r1`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `r1` ( + `id1` int(11) NOT NULL, + `id2` int(11) NOT NULL, + `id3` varchar(100) NOT NULL, + `id4` int(11) NOT NULL, + `value1` int(11) DEFAULT NULL, + `value2` int(11) DEFAULT NULL, + `value3` int(11) DEFAULT NULL, + `value4` int(11) DEFAULT NULL, + PRIMARY KEY (`id1`,`id2`,`id3`,`id4`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; +/* ORDERING KEY : (null) */; + +LOCK TABLES `r1` WRITE; +/*!40000 ALTER TABLE `r1` DISABLE KEYS */; +INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16); +/*!40000 ALTER TABLE `r1` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +2 +==== mysqldump with --innodb-stats-on-metadata ==== + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=4832; +-- SET GLOBAL gtid_slave_pos='0-1-18'; +DROP TABLE IF EXISTS `r1`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `r1` ( + `id1` int(11) NOT NULL, + `id2` int(11) NOT NULL, + `id3` varchar(100) NOT NULL, + `id4` int(11) NOT NULL, + `value1` int(11) DEFAULT NULL, + `value2` int(11) DEFAULT NULL, + `value3` int(11) DEFAULT NULL, + `value4` int(11) DEFAULT NULL, + PRIMARY KEY (`id1`,`id2`,`id3`,`id4`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; +/* ORDERING KEY : (null) */; + +LOCK TABLES `r1` WRITE; +/*!40000 ALTER TABLE `r1` DISABLE KEYS */; +INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16); +/*!40000 ALTER TABLE `r1` ENABLE KEYS */; +UNLOCK TABLES; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +SET GLOBAL binlog_format=statement; +SET GLOBAL binlog_format=row; +drop table r1; +reset master; +set @@global.default_storage_engine=@save_default_storage_engine; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result new file mode 100644 index 0000000000000..11c1f370e7ac3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mysqldump2.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add'; +select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; +case when variable_value - @a > 20 then 'true' else 'false' end +false +select count(*) from t1; +count(*) +50000 +select case when variable_value - @a > 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; +case when variable_value - @a > 100 then 'true' else 'false' end +true +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result new file mode 100644 index 0000000000000..61c1d7e9bdb97 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/negative_stats.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; +SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1; +set session debug_dbug= "+d,myrocks_simulate_negative_stats"; +SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; +CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END +true +set session debug_dbug= "-d,myrocks_simulate_negative_stats"; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result new file mode 100644 index 0000000000000..3a631d2925b11 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result @@ -0,0 +1,63 @@ +Warnings: +Note 1051 Unknown table 'test.ti_nk' +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +skip_merge_sort +true +DROP TABLE ti_nk; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result new file mode 100644 index 0000000000000..8273fdbae9f22 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/optimize_table.result @@ -0,0 +1,77 @@ +DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6; +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +select count(*) from t1; +count(*) +10000 +select count(*) from t2; +count(*) +10000 +select count(*) from t3; +count(*) +10000 +select count(*) from t4; +count(*) +10000 +select count(*) from t5; +count(*) +10000 +select count(*) from t6; +count(*) +10000 +delete from t1 where id <= 9900; +delete from t2 where id <= 9900; +delete from t3 where id <= 9900; +delete from t4 where id <= 9900; +delete from t5 where id <= 9900; +delete from t6 where id <= 9900; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +sst file reduction ok +optimize table t3; +Table Op Msg_type Msg_text +test.t3 optimize status OK +sst file reduction ok +optimize table t4; +Table Op Msg_type Msg_text +test.t4 optimize status OK +sst file reduction ok +optimize table t6; +Table Op Msg_type Msg_text +test.t6 optimize status OK +sst file reduction ok +select count(*) from t1; +count(*) +100 +select count(*) from t2; +count(*) +100 +select count(*) from t3; +count(*) +100 +select count(*) from t4; +count(*) +100 +select count(*) from t5; +count(*) +100 +select count(*) from t6; +count(*) +100 +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +optimize table t5; +Table Op Msg_type Msg_text +test.t5 optimize status OK +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +DROP TABLE t5; +DROP TABLE t6; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result b/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result new file mode 100644 index 0000000000000..27b1779627b23 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/optimizer_loose_index_scans.result @@ -0,0 +1,281 @@ +set optimizer_switch='index_merge_sort_union=off'; +create table t (a int, b int, c int, d int, e int, primary key(a, b, c, d), key(b, d)) engine=rocksdb; +analyze table t; +Table Op Msg_type Msg_text +test.t analyze status OK +show indexes from t; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t 0 PRIMARY 1 a A 100 NULL NULL LSMTREE +t 0 PRIMARY 2 b A 500 NULL NULL LSMTREE +t 0 PRIMARY 3 c A 2500 NULL NULL LSMTREE +t 0 PRIMARY 4 d A 2500 NULL NULL LSMTREE +t 1 b 1 b A 50 NULL NULL LSMTREE +t 1 b 2 d A 500 NULL NULL LSMTREE +set optimizer_switch = 'skip_scan=off'; +explain select b, d from t where d < 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL b 8 NULL # Using where; Using index +rows_read +2500 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select b, d from t where d < 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +rows_read +260 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select b, d from t where d > 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL b 8 NULL # Using where; Using index +rows_read +2500 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select b, d from t where d > 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +rows_read +1509 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +126 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select e from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select e from t where a = 5 and d <= 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where +rows_read +251 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +51 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select e from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select e from t where a = 5 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where +rows_read +251 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index +rows_read +502 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +102 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY PRIMARY 4 NULL # Using where; Using index +rows_read +753 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 3, 5) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +153 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index +rows_read +204 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 5) and b in (1, 2) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +44 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index +rows_read +765 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a in (1, 2, 3, 4, 5) and b in (1, 2, 3) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +165 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using where; Using index +rows_read +51 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = 5 and b = 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +11 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a+1, b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a+1, b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +101 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where; Using index +rows_read +251 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select b, c, d from t where a = 5 and d < 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 16 NULL # Using where; Using index for skip scan +rows_read +101 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=off'; +explain select a, b, c, d from t where a = b and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL b 8 NULL # Using where; Using index +rows_read +2500 +set optimizer_switch = 'skip_scan=on,skip_scan_cost_based=off'; +explain select a, b, c, d from t where a = b and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +rows_read +9 +include/diff_tables.inc [temp_orig, temp_skip] +set optimizer_switch = 'skip_scan=off,skip_scan_cost_based=on'; +set optimizer_switch = 'skip_scan=on'; +set optimizer_trace = 'enabled=on'; +explain select a, b, c, d from t where a = 5 and d < 3 order by b, c, d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%order_attribute_not_prefix_in_index%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and d >= 98 and e = 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY PRIMARY 4 const # Using where +select count(*) from information_schema.optimizer_trace where trace like '%query_references_nonkey_column%'; +count(*) +1 +explain select a, b, c, d from t where a = 5 or b = 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%'; +count(*) +1 +explain select a, b, c, d from t where a = 5 or b = 2 or d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%'; +count(*) +1 +explain select a, b, c, d from t where a = 5 or d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index PRIMARY,b b 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_tree%'; +count(*) +1 +explain select a, b, c, d from t where ((a = 5 and b = 2) or a = 2) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b b 8 NULL # Using where; Using index for skip scan +select count(*) from information_schema.optimizer_trace where trace like '%keypart_in_disjunctive_query%'; +count(*) +1 +explain select a, b, c, d from t where a > 2 and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 4 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%prefix_not_const_equality%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and (d >= 98 or d < 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%range_predicate_too_complex%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and b = 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 8 const,const # Using index +select count(*) from information_schema.optimizer_trace where trace like '%no_range_predicate%'; +count(*) +1 +explain select a, b, c, d from t where a = 2 and c > 2 and d < 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ref PRIMARY,b PRIMARY 4 const # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%too_many_range_predicates%'; +count(*) +1 +explain select a, b, c, d from t where (a < 1 or a = 4 or a = 5) and b in (1, 2, 3) and d >= 98; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t range PRIMARY,b PRIMARY 8 NULL # Using where; Using index +select count(*) from information_schema.optimizer_trace where trace like '%prefix_not_const_equality%'; +count(*) +1 +set optimizer_trace = 'enabled=off'; +set optimizer_switch= 'skip_scan=off'; +drop table t; +set optimizer_switch='index_merge_sort_union=on'; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result new file mode 100644 index 0000000000000..1ba966e9e075b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result @@ -0,0 +1,688 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; +Table Op Msg_type Msg_text +test.t1 optimize status OK +Table Op Msg_type Msg_text +test.t1 analyze status OK +Table Op Msg_type Msg_text +test.t1 repair status OK +Table Op Msg_type Msg_text +test.t1 check status OK +select lower(table_name) as tname +from information_schema.tables +where table_schema=database() +order by tname; +tname +t1 +temp0 +var_pop +var_samp +SELECT * FROM t1 ORDER BY i LIMIT 10; +i j k +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +CREATE TABLE ti( +id INT, +amount DECIMAL(7,2), +tr_date DATE +) ENGINE=ROCKSDB +PARTITION BY HASH(MONTH(tr_date)) +PARTITIONS 6; +CREATE TABLE members ( +firstname VARCHAR(25) NOT NULL, +lastname VARCHAR(25) NOT NULL, +username VARCHAR(16) NOT NULL, +email VARCHAR(35), +joined DATE NOT NULL +) ENGINE=ROCKSDB +PARTITION BY KEY(joined) +PARTITIONS 6; +CREATE TABLE members_2 ( +firstname VARCHAR(25) NOT NULL, +lastname VARCHAR(25) NOT NULL, +username VARCHAR(16) NOT NULL, +email VARCHAR(35), +joined DATE NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE(YEAR(joined)) ( +PARTITION p0 VALUES LESS THAN (1960), +PARTITION p1 VALUES LESS THAN (1970), +PARTITION p2 VALUES LESS THAN (1980), +PARTITION p3 VALUES LESS THAN (1990), +PARTITION p4 VALUES LESS THAN MAXVALUE +); +CREATE TABLE t2 (val INT) +ENGINE=ROCKSDB +PARTITION BY LIST(val)( +PARTITION mypart VALUES IN (1,3,5), +PARTITION MyPart VALUES IN (2,4,6) +); +ERROR HY000: Duplicate partition name MyPart +CREATE TABLE employees ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT NOT NULL, +store_id INT NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE (store_id) ( +PARTITION p0 VALUES LESS THAN (6), +PARTITION p1 VALUES LESS THAN (11), +PARTITION p2 VALUES LESS THAN (16), +PARTITION p3 VALUES LESS THAN MAXVALUE +); +CREATE TABLE employees_2 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT NOT NULL, +store_id INT NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE (job_code) ( +PARTITION p0 VALUES LESS THAN (100), +PARTITION p1 VALUES LESS THAN (1000), +PARTITION p2 VALUES LESS THAN (10000) +); +CREATE TABLE employees_3 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY RANGE (YEAR(separated)) ( +PARTITION p0 VALUES LESS THAN (1991), +PARTITION p1 VALUES LESS THAN (1996), +PARTITION p2 VALUES LESS THAN (2001), +PARTITION p3 VALUES LESS THAN MAXVALUE +); +CREATE TABLE quarterly_report_status ( +report_id INT NOT NULL, +report_status VARCHAR(20) NOT NULL, +report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=ROCKSDB +PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) ( +PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ), +PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ), +PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ), +PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ), +PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ), +PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ), +PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ), +PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ), +PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ), +PARTITION p9 VALUES LESS THAN (MAXVALUE) +); +CREATE TABLE employees_4 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY LIST(store_id) ( +PARTITION pNorth VALUES IN (3,5,6,9,17), +PARTITION pEast VALUES IN (1,2,10,11,19,20), +PARTITION pWest VALUES IN (4,12,13,14,18), +PARTITION pCentral VALUES IN (7,8,15,16) +); +CREATE TABLE h2 ( +c1 INT, +c2 INT +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION p0 VALUES IN (1, 4, 7), +PARTITION p1 VALUES IN (2, 5, 8) +); +INSERT INTO h2 VALUES (3, 5); +ERROR HY000: Table has no partition for value 3 +CREATE TABLE rcx ( +a INT, +b INT, +c CHAR(3), +d INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,d,c) ( +PARTITION p0 VALUES LESS THAN (5,10,'ggg'), +PARTITION p1 VALUES LESS THAN (10,20,'mmm'), +PARTITION p2 VALUES LESS THAN (15,30,'sss'), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) +); +CREATE TABLE r1 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (5), +PARTITION p1 VALUES LESS THAN (MAXVALUE) +); +INSERT INTO r1 VALUES (5,10), (5,11), (5,12); +CREATE TABLE rc1 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a, b) ( +PARTITION p0 VALUES LESS THAN (5, 12), +PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE) +); +INSERT INTO rc1 VALUES (5,10), (5,11), (5,12); +SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12); +(5,10) < (5,12) (5,11) < (5,12) (5,12) < (5,12) +1 1 0 +CREATE TABLE rx ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS (a) ( +PARTITION p0 VALUES LESS THAN (5), +PARTITION p1 VALUES LESS THAN (MAXVALUE) +); +INSERT INTO rx VALUES (5,10), (5,11), (5,12); +CREATE TABLE rc2 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b) ( +PARTITION p0 VALUES LESS THAN (0,10), +PARTITION p1 VALUES LESS THAN (10,20), +PARTITION p2 VALUES LESS THAN (10,30), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE) +); +CREATE TABLE rc3 ( +a INT, +b INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b) ( +PARTITION p0 VALUES LESS THAN (0,10), +PARTITION p1 VALUES LESS THAN (10,20), +PARTITION p2 VALUES LESS THAN (10,30), +PARTITION p3 VALUES LESS THAN (10,35), +PARTITION p4 VALUES LESS THAN (20,40), +PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE) +); +CREATE TABLE rc4 ( +a INT, +b INT, +c INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b,c) ( +PARTITION p0 VALUES LESS THAN (0,25,50), +PARTITION p1 VALUES LESS THAN (10,20,100), +PARTITION p2 VALUES LESS THAN (10,30,50), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) +); +SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50); +(0,25,50) < (10,20,100) (10,20,100) < (10,30,50) +1 1 +CREATE TABLE rcf ( +a INT, +b INT, +c INT +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(a,b,c) ( +PARTITION p0 VALUES LESS THAN (0,25,50), +PARTITION p1 VALUES LESS THAN (20,20,100), +PARTITION p2 VALUES LESS THAN (10,30,50), +PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) +); +ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition +CREATE TABLE employees_by_lname ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT NOT NULL, +store_id INT NOT NULL +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS (lname) ( +PARTITION p0 VALUES LESS THAN ('g'), +PARTITION p1 VALUES LESS THAN ('m'), +PARTITION p2 VALUES LESS THAN ('t'), +PARTITION p3 VALUES LESS THAN (MAXVALUE) +); +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) ( +PARTITION p0 VALUES LESS THAN ('g'), +PARTITION p1 VALUES LESS THAN ('m'), +PARTITION p2 VALUES LESS THAN ('t'), +PARTITION p3 VALUES LESS THAN (MAXVALUE) +); +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) ( +PARTITION p0 VALUES LESS THAN ('1970-01-01'), +PARTITION p1 VALUES LESS THAN ('1980-01-01'), +PARTITION p2 VALUES LESS THAN ('1990-01-01'), +PARTITION p3 VALUES LESS THAN ('2000-01-01'), +PARTITION p4 VALUES LESS THAN ('2010-01-01'), +PARTITION p5 VALUES LESS THAN (MAXVALUE) +); +CREATE TABLE customers_1 ( +first_name VARCHAR(25), +last_name VARCHAR(25), +street_1 VARCHAR(30), +street_2 VARCHAR(30), +city VARCHAR(15), +renewal DATE +) ENGINE=ROCKSDB +PARTITION BY LIST COLUMNS(city) ( +PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'Mönsterås'), +PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'), +PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'), +PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo') +); +CREATE TABLE customers_2 ( +first_name VARCHAR(25), +last_name VARCHAR(25), +street_1 VARCHAR(30), +street_2 VARCHAR(30), +city VARCHAR(15), +renewal DATE +) ENGINE=ROCKSDB +PARTITION BY LIST COLUMNS(renewal) ( +PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03', +'2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'), +PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10', +'2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'), +PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17', +'2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'), +PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24', +'2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28') +); +CREATE TABLE customers_3 ( +first_name VARCHAR(25), +last_name VARCHAR(25), +street_1 VARCHAR(30), +street_2 VARCHAR(30), +city VARCHAR(15), +renewal DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE COLUMNS(renewal) ( +PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'), +PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'), +PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'), +PARTITION pWeek_4 VALUES LESS THAN('2010-03-01') +); +CREATE TABLE employees_hash ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY HASH(store_id) +PARTITIONS 4; +CREATE TABLE employees_hash_1 ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY HASH( YEAR(hired) ) +PARTITIONS 4; +CREATE TABLE t1_hash ( +col1 INT, +col2 CHAR(5), +col3 DATE +) ENGINE=ROCKSDB +PARTITION BY HASH( YEAR(col3) ) +PARTITIONS 4; +CREATE TABLE employees_linear_hash ( +id INT NOT NULL, +fname VARCHAR(30), +lname VARCHAR(30), +hired DATE NOT NULL DEFAULT '1970-01-01', +separated DATE NOT NULL DEFAULT '9999-12-31', +job_code INT, +store_id INT +) ENGINE=ROCKSDB +PARTITION BY LINEAR HASH( YEAR(hired) ) +PARTITIONS 4; +CREATE TABLE t1_linear_hash ( +col1 INT, +col2 CHAR(5), +col3 DATE +) ENGINE=ROCKSDB +PARTITION BY LINEAR HASH( YEAR(col3) ) +PARTITIONS 6; +CREATE TABLE k1 ( +id INT NOT NULL PRIMARY KEY, +name VARCHAR(20) +) ENGINE=ROCKSDB +PARTITION BY KEY() +PARTITIONS 2; +CREATE TABLE k2 ( +id INT NOT NULL, +name VARCHAR(20), +UNIQUE KEY (id) +) ENGINE=ROCKSDB +PARTITION BY KEY() +PARTITIONS 2; +CREATE TABLE tm1 ( +s1 CHAR(32) PRIMARY KEY +) ENGINE=ROCKSDB +PARTITION BY KEY(s1) +PARTITIONS 10; +CREATE TABLE tk ( +col1 INT NOT NULL, +col2 CHAR(5), +col3 DATE +) ENGINE=ROCKSDB +PARTITION BY LINEAR KEY (col1) +PARTITIONS 3; +CREATE TABLE ts ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) +SUBPARTITIONS 2 ( +PARTITION p0 VALUES LESS THAN (1990), +PARTITION p1 VALUES LESS THAN (2000), +PARTITION p2 VALUES LESS THAN MAXVALUE +); +CREATE TABLE ts_1 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s2, +SUBPARTITION s3 +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s4, +SUBPARTITION s5 +) +); +CREATE TABLE ts_2 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2, +SUBPARTITION s3 +) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ' +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2, +SUBPARTITION s3 +) +)' at line 11 +CREATE TABLE ts_3 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s2, +SUBPARTITION s3 +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s4, +SUBPARTITION s5 +) +); +CREATE TABLE ts_4 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0, +SUBPARTITION s1 +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s2, +SUBPARTITION s3 +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s4, +SUBPARTITION s5 +) +); +CREATE TABLE ts_5 ( +id INT, +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE(YEAR(purchased)) +SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990) ( +SUBPARTITION s0a, +SUBPARTITION s0b +), +PARTITION p1 VALUES LESS THAN (2000) ( +SUBPARTITION s1a, +SUBPARTITION s1b +), +PARTITION p2 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2a, +SUBPARTITION s2b +) +); +CREATE TABLE trb3 ( +id INT, +name VARCHAR(50), +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990), +PARTITION p1 VALUES LESS THAN (1995), +PARTITION p2 VALUES LESS THAN (2000), +PARTITION p3 VALUES LESS THAN (2005) +); +ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2; +CREATE TABLE tr ( +id INT, +name VARCHAR(50), +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(purchased) ) ( +PARTITION p0 VALUES LESS THAN (1990), +PARTITION p1 VALUES LESS THAN (1995), +PARTITION p2 VALUES LESS THAN (2000), +PARTITION p3 VALUES LESS THAN (2005) +); +INSERT INTO tr VALUES +(1, 'desk organiser', '2003-10-15'), +(2, 'CD player', '1993-11-05'), +(3, 'TV set', '1996-03-10'), +(4, 'bookcase', '1982-01-10'), +(5, 'exercise bike', '2004-05-09'), +(6, 'sofa', '1987-06-05'), +(7, 'popcorn maker', '2001-11-22'), +(8, 'aquarium', '1992-08-04'), +(9, 'study desk', '1984-09-16'), +(10, 'lava lamp', '1998-12-25'); +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; +id name purchased +3 TV set 1996-03-10 +10 lava lamp 1998-12-25 +ALTER TABLE tr DROP PARTITION p2; +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; +id name purchased +CREATE TABLE members_3 ( +id INT, +fname VARCHAR(25), +lname VARCHAR(25), +dob DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE( YEAR(dob) ) ( +PARTITION p0 VALUES LESS THAN (1970), +PARTITION p1 VALUES LESS THAN (1980), +PARTITION p2 VALUES LESS THAN (1990) +); +ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000)); +ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960)); +ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition +CREATE TABLE clients ( +id INT, +fname VARCHAR(30), +lname VARCHAR(30), +signed DATE +) ENGINE=ROCKSDB +PARTITION BY HASH( MONTH(signed) ) +PARTITIONS 12; +ALTER TABLE clients COALESCE PARTITION 4; +CREATE TABLE clients_lk ( +id INT, +fname VARCHAR(30), +lname VARCHAR(30), +signed DATE +) ENGINE=ROCKSDB +PARTITION BY LINEAR KEY(signed) +PARTITIONS 12; +ALTER TABLE clients COALESCE PARTITION 18; +ERROR HY000: Cannot remove all partitions, use DROP TABLE instead +ALTER TABLE clients ADD PARTITION PARTITIONS 6; +CREATE TABLE trb1 ( +id INT, +name VARCHAR(50), +purchased DATE +) ENGINE=ROCKSDB +PARTITION BY RANGE(id) ( +PARTITION p0 VALUES LESS THAN (3), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (9), +PARTITION p3 VALUES LESS THAN (11) +); +INSERT INTO trb1 VALUES +(1, 'desk organiser', '2003-10-15'), +(2, 'CD player', '1993-11-05'), +(3, 'TV set', '1996-03-10'), +(4, 'bookcase', '1982-01-10'), +(5, 'exercise bike', '2004-05-09'), +(6, 'sofa', '1987-06-05'), +(7, 'popcorn maker', '2001-11-22'), +(8, 'aquarium', '1992-08-04'), +(9, 'study desk', '1984-09-16'), +(10, 'lava lamp', '1998-12-25'); +ALTER TABLE trb1 ADD PRIMARY KEY (id); +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result new file mode 100644 index 0000000000000..2e8610d43bd19 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/perf_context.result @@ -0,0 +1,160 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +SET @prior_rocksdb_perf_context_level = @@rocksdb_perf_context_level; +SET GLOBAL rocksdb_perf_context_level=3; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT, PRIMARY KEY (k)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_NAME = 't1'; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL USER_KEY_COMPARISON_COUNT # +test t1 NULL BLOCK_CACHE_HIT_COUNT # +test t1 NULL BLOCK_READ_COUNT # +test t1 NULL BLOCK_READ_BYTE # +test t1 NULL BLOCK_READ_TIME # +test t1 NULL BLOCK_CHECKSUM_TIME # +test t1 NULL BLOCK_DECOMPRESS_TIME # +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT # +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT # +test t1 NULL GET_SNAPSHOT_TIME # +test t1 NULL GET_FROM_MEMTABLE_TIME # +test t1 NULL GET_FROM_MEMTABLE_COUNT # +test t1 NULL GET_POST_PROCESS_TIME # +test t1 NULL GET_FROM_OUTPUT_FILES_TIME # +test t1 NULL SEEK_ON_MEMTABLE_TIME # +test t1 NULL SEEK_ON_MEMTABLE_COUNT # +test t1 NULL SEEK_CHILD_SEEK_TIME # +test t1 NULL SEEK_CHILD_SEEK_COUNT # +test t1 NULL SEEK_IN_HEAP_TIME # +test t1 NULL SEEK_INTERNAL_SEEK_TIME # +test t1 NULL FIND_NEXT_USER_ENTRY_TIME # +test t1 NULL WRITE_WAL_TIME # +test t1 NULL WRITE_MEMTABLE_TIME # +test t1 NULL WRITE_DELAY_TIME # +test t1 NULL WRITE_PRE_AND_POST_PROCESS_TIME # +test t1 NULL DB_MUTEX_LOCK_NANOS # +test t1 NULL DB_CONDITION_WAIT_NANOS # +test t1 NULL MERGE_OPERATOR_TIME_NANOS # +test t1 NULL READ_INDEX_BLOCK_NANOS # +test t1 NULL READ_FILTER_BLOCK_NANOS # +test t1 NULL NEW_TABLE_BLOCK_ITER_NANOS # +test t1 NULL NEW_TABLE_ITERATOR_NANOS # +test t1 NULL BLOCK_SEEK_NANOS # +test t1 NULL FIND_TABLE_NANOS # +test t1 NULL IO_THREAD_POOL_ID # +test t1 NULL IO_BYTES_WRITTEN # +test t1 NULL IO_BYTES_READ # +test t1 NULL IO_OPEN_NANOS # +test t1 NULL IO_ALLOCATE_NANOS # +test t1 NULL IO_WRITE_NANOS # +test t1 NULL IO_READ_NANOS # +test t1 NULL IO_RANGE_SYNC_NANOS # +test t1 NULL IO_LOGGER_NANOS # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL; +STAT_TYPE VALUE +USER_KEY_COMPARISON_COUNT # +BLOCK_CACHE_HIT_COUNT # +BLOCK_READ_COUNT # +BLOCK_READ_BYTE # +BLOCK_READ_TIME # +BLOCK_CHECKSUM_TIME # +BLOCK_DECOMPRESS_TIME # +INTERNAL_KEY_SKIPPED_COUNT # +INTERNAL_DELETE_SKIPPED_COUNT # +GET_SNAPSHOT_TIME # +GET_FROM_MEMTABLE_TIME # +GET_FROM_MEMTABLE_COUNT # +GET_POST_PROCESS_TIME # +GET_FROM_OUTPUT_FILES_TIME # +SEEK_ON_MEMTABLE_TIME # +SEEK_ON_MEMTABLE_COUNT # +SEEK_CHILD_SEEK_TIME # +SEEK_CHILD_SEEK_COUNT # +SEEK_IN_HEAP_TIME # +SEEK_INTERNAL_SEEK_TIME # +FIND_NEXT_USER_ENTRY_TIME # +WRITE_WAL_TIME # +WRITE_MEMTABLE_TIME # +WRITE_DELAY_TIME # +WRITE_PRE_AND_POST_PROCESS_TIME # +DB_MUTEX_LOCK_NANOS # +DB_CONDITION_WAIT_NANOS # +MERGE_OPERATOR_TIME_NANOS # +READ_INDEX_BLOCK_NANOS # +READ_FILTER_BLOCK_NANOS # +NEW_TABLE_BLOCK_ITER_NANOS # +NEW_TABLE_ITERATOR_NANOS # +BLOCK_SEEK_NANOS # +FIND_TABLE_NANOS # +IO_THREAD_POOL_ID # +IO_BYTES_WRITTEN # +IO_BYTES_READ # +IO_OPEN_NANOS # +IO_ALLOCATE_NANOS # +IO_WRITE_NANOS # +IO_READ_NANOS # +IO_RANGE_SYNC_NANOS # +IO_LOGGER_NANOS # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 0 +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0 +SELECT * FROM t1; +i j +1 1 +2 2 +3 3 +4 4 +5 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 5 +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0 +SELECT * FROM t1 WHERE j BETWEEN 1 AND 5; +i j +1 1 +2 2 +3 3 +4 4 +5 5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); +TABLE_SCHEMA TABLE_NAME PARTITION_NAME STAT_TYPE VALUE +test t1 NULL INTERNAL_KEY_SKIPPED_COUNT 10 +test t1 NULL INTERNAL_DELETE_SKIPPED_COUNT 0 +BEGIN; +INSERT INTO t2 VALUES (1), (2); +INSERT INTO t2 VALUES (3), (4); +COMMIT; +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; +COUNT(*) +0 +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS' AND VALUE > 0; +COUNT(*) +1 +SELECT VALUE INTO @a from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; +INSERT INTO t2 VALUES (5), (6), (7), (8); +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; +COUNT(*) +1 +SELECT VALUE INTO @b from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; +SELECT CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END; +CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END +true +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_perf_context_level = @prior_rocksdb_perf_context_level; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result b/storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result new file mode 100644 index 0000000000000..bc5739c2d9672 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/persistent_cache.result @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a int primary key) ENGINE=ROCKSDB; +insert into t1 values (1); +set global rocksdb_force_flush_memtable_now=1; +select * from t1 where a = 1; +a +1 +select * from t1 where a = 1; +a +1 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result new file mode 100644 index 0000000000000..b83f0a474ccb0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); +select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 734 uuid:1-3 +select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +case when variable_value-@p < 1000 then 'true' else variable_value-@p end +true +select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s < 100 then 'true' else variable_value-@s end +true +SELECT * FROM t1; +id value +1 1 +INSERT INTO t1 values (2, 2); +ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT. +ROLLBACK; +SELECT * FROM t1; +id value +1 10001 +INSERT INTO t1 values (2, 2); +SELECT * FROM t1 ORDER BY id; +id value +1 10001 +2 2 +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +9998 +COMMIT; +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +DROP TABLE t1; +reset master; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result new file mode 100644 index 0000000000000..89ebe76038421 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result @@ -0,0 +1,210 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT, +a INT, +b INT, +PRIMARY KEY (i), +KEY ka(a), +KEY kb(b) comment 'rev:cf1' +) ENGINE = rocksdb; +explain extended select * from t1 where a> 500 and a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` > 500 and `test`.`t1`.`a` < 750 +explain extended select * from t1 where a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 +explain extended select * from t1 where a> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` > 500 +explain extended select * from t1 where a>=0 and a<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` >= 0 and `test`.`t1`.`a` <= 1000 +explain extended select * from t1 where b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` > 500 and `test`.`t1`.`b` < 750 +explain extended select * from t1 where b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` < 750 +explain extended select * from t1 where b> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` > 500 +explain extended select * from t1 where b>=0 and b<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` >= 0 and `test`.`t1`.`b` <= 1000 +set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range; +set rocksdb_records_in_range = 15000; +explain extended select a from t1 where a < 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 15000 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 750 +explain extended select a, b from t1 where a < 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL ka NULL NULL NULL 20000 75.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 +explain extended select a from t1 where a = 700; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ref ka ka 5 const 15000 100.00 Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 700 +explain extended select a,b from t1 where a = 700; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ref ka ka 5 const 15000 100.00 +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` = 700 +explain extended select a from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 index ka ka 5 NULL 20000 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` in (700,800) +explain extended select a,b from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL ka NULL NULL NULL 20000 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` in (700,800) +set rocksdb_records_in_range=8000; +explain extended select a from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 16000 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` in (700,800) +explain extended select a,b from t1 where a in (700, 800); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL ka NULL NULL NULL 20000 80.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` in (700,800) +set rocksdb_records_in_range = @save_rocksdb_records_in_range; +set global rocksdb_force_flush_memtable_now = true; +explain extended select * from t1 where a> 500 and a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` > 500 and `test`.`t1`.`a` < 750 +explain extended select * from t1 where a< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 +explain extended select * from t1 where a> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` > 500 +explain extended select * from t1 where a>=0 and a<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` >= 0 and `test`.`t1`.`a` <= 1000 +explain extended select * from t1 where b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` > 500 and `test`.`t1`.`b` < 750 +explain extended select * from t1 where b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` < 750 +explain extended select * from t1 where b> 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` > 500 +explain extended select * from t1 where b>=0 and b<=1000; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` >= 0 and `test`.`t1`.`b` <= 1000 +explain extended select * from t1 where a>= 500 and a<= 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka ka 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` >= 500 and `test`.`t1`.`a` <= 500 +explain extended select * from t1 where b>= 500 and b<= 500; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kb kb 5 NULL 1000 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` >= 500 and `test`.`t1`.`b` <= 500 +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range ka,kb ka 5 NULL 1000 100.00 Using index condition; Using where +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 and `test`.`t1`.`b` > 500 and `test`.`t1`.`b` < 750 +drop index ka on t1; +drop index kb on t1; +create index kab on t1(a,b); +set global rocksdb_force_flush_memtable_now = true; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kab kab 5 NULL 1000 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 and `test`.`t1`.`b` > 500 and `test`.`t1`.`b` < 750 +set rocksdb_records_in_range=444; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range kab kab 5 NULL 444 100.00 Using where; Using index +Warnings: +Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 and `test`.`t1`.`b` > 500 and `test`.`t1`.`b` < 750 +set rocksdb_records_in_range=0; +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where +drop table linktable; +CREATE TABLE `linktable` ( +`id1` bigint(20) unsigned NOT NULL DEFAULT '0', +`id1_type` int(10) unsigned NOT NULL DEFAULT '0', +`id2` bigint(20) unsigned NOT NULL DEFAULT '0', +`id2_type` int(10) unsigned NOT NULL DEFAULT '0', +`link_type` bigint(20) unsigned NOT NULL DEFAULT '0', +`visibility` tinyint(3) NOT NULL DEFAULT '0', +`data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', +`time` bigint(20) unsigned NOT NULL DEFAULT '0', +`version` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', +KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where +drop table linktable; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result b/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result new file mode 100644 index 0000000000000..f227d200bcc5c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/repair_table.result @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +test.t2 repair note The storage engine for the table doesn't support repair +INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f'); +REPAIR LOCAL TABLE t2; +Table Op Msg_type Msg_text +test.t2 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); +INSERT INTO t2 (a,b) VALUES (9,'i'); +REPAIR LOCAL TABLE t2, t1 EXTENDED; +Table Op Msg_type Msg_text +test.t2 repair note The storage engine for the table doesn't support repair +test.t1 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (10,'j'); +INSERT INTO t2 (a,b) VALUES (11,'k'); +REPAIR TABLE t1, t2 QUICK USE_FRM; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +test.t2 repair note The storage engine for the table doesn't support repair +INSERT INTO t1 (a,b) VALUES (12,'l'); +INSERT INTO t2 (a,b) VALUES (13,'m'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM; +Table Op Msg_type Msg_text +test.t1 repair note The storage engine for the table doesn't support repair +test.t2 repair note The storage engine for the table doesn't support repair +FLUSH TABLE t1; +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/replace.result b/storage/rocksdb/mysql-test/rocksdb/r/replace.result new file mode 100644 index 0000000000000..f8f61a3f8c38b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/replace.result @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +SELECT a,b FROM t1; +a b +1 a +2 b +3 c +4 d +5 e +REPLACE t1 (a,b) VALUE (10,'foo'),(10,'foo'); +SELECT a,b FROM t1; +a b +1 a +10 foo +10 foo +2 b +3 c +4 d +5 e +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +INSERT INTO t1 (a,b) VALUES (4,'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +REPLACE INTO t1 (a,b) VALUES (4,'b'); +SELECT a,b FROM t1; +a b +1 a +3 c +4 b +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result new file mode 100644 index 0000000000000..e945e362f997f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -0,0 +1,2510 @@ +select ENGINE,COMMENT,TRANSACTIONS,XA,SAVEPOINTS from information_schema.engines where engine = 'rocksdb'; +ENGINE COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t11,t12,t13,t14,t15,t16,t17,t18,t19,t20; +drop table if exists t21,t22,t23,t24,t25,t26,t27,t28,t29; +drop table if exists t30,t31,t32,t33,t34,t35,t36,t37,t38,t39; +drop table if exists t40,t41,t42,t43,t44,t45,t46,t47,t48,t49; +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +# +# Issue #1: Don't update indexes if index values have not changed +# +create table t1 ( +pk int primary key, +a int, +b int, +key(a) +) engine=rocksdb; +insert into t1 values +(1,1,1), (2,2,2), (3,3,3), (4,4,4); +set @var1=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_written'); +# Do an update that doesn't change the key 'a'. +update t1 set b=3334341 where a=2; +set @var2=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_written'); +# The following should produce 1 +select @var2 - @var1; +@var2 - @var1 +1 +# Do an update that sets the key to the same value +update t1 set a=pk where a=3; +set @var3=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_written'); +# We have 'updated' column to the same value, so the following must return 0: +select @var3 - @var2; +@var3 - @var2 +0 +drop table t1; +create table t0 (a int primary key) engine=rocksdb; +show create table t0; +Table Create Table +t0 CREATE TABLE `t0` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +drop table t0; +create table t1 (a int primary key, b int) engine=rocksdb; +insert into t1 values (1,1); +insert into t1 values (2,2); +select * from t1; +a b +1 1 +2 2 +# Check that we can create another table and insert there +create table t2 (a varchar(10) primary key, b varchar(10)) engine=rocksdb; +insert into t2 value ('abc','def'); +insert into t2 value ('hijkl','mnopq'); +select * from t2; +a b +abc def +hijkl mnopq +# Select again from t1 to see that records from different tables dont mix +select * from t1; +a b +1 1 +2 2 +explain select * from t2 where a='no-such-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +explain select * from t2 where a='abc'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 const PRIMARY PRIMARY 12 const # +select * from t2 where a='abc'; +a b +abc def +# Try a composite PK +create table t3 ( +pk1 int, +pk2 varchar(10), +col1 varchar(10), +primary key(pk1, pk2) +) engine=rocksdb; +insert into t3 values (2,'two', 'row#2'); +insert into t3 values (3,'three', 'row#3'); +insert into t3 values (1,'one', 'row#1'); +select * from t3; +pk1 pk2 col1 +1 one row#1 +2 two row#2 +3 three row#3 +select * from t3 where pk1=3 and pk2='three'; +pk1 pk2 col1 +3 three row#3 +drop table t1, t2, t3; +# +# Test blob values +# +create table t4 (a int primary key, b blob) engine=rocksdb; +insert into t4 values (1, repeat('quux-quux', 60)); +insert into t4 values (10, repeat('foo-bar', 43)); +insert into t4 values (5, repeat('foo-bar', 200)); +insert into t4 values (2, NULL); +select +a, +(case a +when 1 then b=repeat('quux-quux', 60) +when 10 then b=repeat('foo-bar', 43) +when 5 then b=repeat('foo-bar', 200) +when 2 then b is null +else 'IMPOSSIBLE!' end) as CMP +from t4; +a CMP +1 1 +2 1 +5 1 +10 1 +drop table t4; +# +# Test blobs of various sizes +# +# TINYBLOB +create table t5 (a int primary key, b tinyblob) engine=rocksdb; +insert into t5 values (1, repeat('quux-quux', 6)); +insert into t5 values (10, repeat('foo-bar', 4)); +insert into t5 values (5, repeat('foo-bar', 2)); +select +a, +(case a +when 1 then b=repeat('quux-quux', 6) +when 10 then b=repeat('foo-bar', 4) +when 5 then b=repeat('foo-bar', 2) +else 'IMPOSSIBLE!' end) as CMP +from t5; +a CMP +1 1 +5 1 +10 1 +drop table t5; +# MEDIUMBLOB +create table t6 (a int primary key, b mediumblob) engine=rocksdb; +insert into t6 values (1, repeat('AB', 65000)); +insert into t6 values (10, repeat('bbb', 40000)); +insert into t6 values (5, repeat('foo-bar', 2)); +select +a, +(case a +when 1 then b=repeat('AB', 65000) +when 10 then b=repeat('bbb', 40000) +when 5 then b=repeat('foo-bar', 2) +else 'IMPOSSIBLE!' end) as CMP +from t6; +a CMP +1 1 +5 1 +10 1 +drop table t6; +# LONGBLOB +create table t7 (a int primary key, b longblob) engine=rocksdb; +insert into t7 values (1, repeat('AB', 65000)); +insert into t7 values (10, repeat('bbb', 40000)); +insert into t7 values (5, repeat('foo-bar', 2)); +select +a, +(case a +when 1 then b=repeat('AB', 65000) +when 10 then b=repeat('bbb', 40000) +when 5 then b=repeat('foo-bar', 2) +else 'IMPOSSIBLE!' end) as CMP +from t7; +a CMP +1 1 +5 1 +10 1 +drop table t7; +# +# Check if DELETEs work +# +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +insert into t8 values +('one', 'eins'), +('two', 'zwei'), +('three', 'drei'), +('four', 'vier'), +('five', 'funf'); +# Delete by PK +explain delete from t8 where a='three'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t8 range PRIMARY PRIMARY 12 NULL # Using where +delete from t8 where a='three'; +select * from t8; +a col1 +five funf +four vier +one eins +two zwei +# Delete while doing a full table scan +delete from t8 where col1='eins' or col1='vier'; +select * from t8; +a col1 +five funf +two zwei +# delete w/o WHERE: +delete from t8; +select * from t8; +a col1 +# +# Test UPDATEs +# +insert into t8 values +('one', 'eins'), +('two', 'zwei'), +('three', 'drei'), +('four', 'vier'), +('five', 'funf'); +update t8 set col1='dva' where a='two'; +update t8 set a='fourAAA' where col1='vier'; +select * from t8; +a col1 +five funf +fourAAA vier +one eins +three drei +two dva +delete from t8; +# +# Basic transactions tests +# +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +rollback; +select * from t8; +a col1 +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +commit; +select * from t8; +a col1 +trx1-val1 data +trx1-val2 data +drop table t8; +# +# Check if DROP TABLE works +# +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +a col1 +insert into t8 values ('foo','foo'); +drop table t8; +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +a col1 +drop table t8; +# +# MDEV-3961: Assertion ... on creating a TEMPORARY RocksDB table +# +CREATE TEMPORARY TABLE t10 (pk INT PRIMARY KEY) ENGINE=RocksDB; +ERROR HY000: Table storage engine 'ROCKSDB' does not support the create option 'TEMPORARY' +# +# MDEV-3963: JOIN or WHERE conditions involving keys on RocksDB tables don't work +# +CREATE TABLE t10 (i INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t10 VALUES (1),(3); +CREATE TABLE t11 (j INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t11 VALUES (1),(4); +select * from t10; +i +1 +3 +select * from t11; +j +1 +4 +EXPLAIN +SELECT * FROM t10, t11 WHERE i=j; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t10 index PRIMARY PRIMARY 4 NULL # Using index +1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t10.i # Using index +SELECT * FROM t10, t11 WHERE i=j; +i j +1 1 +DROP TABLE t10,t11; +# +# MDEV-3962: SELECT with ORDER BY causes "ERROR 1030 (HY000): Got error 122 +# +CREATE TABLE t12 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t12 VALUES (2),(1); +SELECT * FROM t12 ORDER BY pk; +pk +1 +2 +DROP TABLE t12; +# +# MDEV-3964: Assertion `!pk_descr' fails in ha_rocksdb::open on adding partitions ... +# +create table t14 (pk int primary key) engine=RocksDB partition by hash(pk) partitions 2; +drop table t14; +# +# MDEV-3960: Server crashes on running DISCARD TABLESPACE on a RocksDB table +# +create table t9 (i int primary key) engine=rocksdb; +alter table t9 discard tablespace; +ERROR HY000: Storage engine ROCKSDB of the table `test`.`t9` doesn't have this option +drop table t9; +# +# MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ... +# on accessing a table after ALTER +# +CREATE TABLE t15 (a INT, rocksdb_pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t15 VALUES (1,1),(5,2); +ALTER TABLE t15 DROP COLUMN a; +DROP TABLE t15; +# +# MDEV-3968: UPDATE produces a wrong result while modifying a PK on a RocksDB table +# +create table t16 (pk int primary key, a char(8)) engine=RocksDB; +insert into t16 values (1,'a'),(2,'b'),(3,'c'),(4,'d'); +update t16 set pk=100, a = 'updated' where a in ('b','c'); +ERROR 23000: Duplicate entry '100' for key 'PRIMARY' +select * from t16; +pk a +1 a +2 b +3 c +4 d +drop table t16; +# +# MDEV-3970: A set of assorted crashes on inserting a row into a RocksDB table +# +drop table if exists t_very_long_table_name; +CREATE TABLE `t_very_long_table_name` ( +`c` char(1) NOT NULL, +`c0` char(0) NOT NULL, +`c1` char(1) NOT NULL, +`c20` char(20) NOT NULL, +`c255` char(255) NOT NULL, +PRIMARY KEY (`c255`) +) ENGINE=RocksDB DEFAULT CHARSET=latin1; +INSERT INTO t_very_long_table_name VALUES ('a', '', 'c', REPEAT('a',20), REPEAT('x',255)); +drop table t_very_long_table_name; +# +# Test table locking and read-before-write checks. +# +create table t17 (pk varchar(12) primary key, col1 varchar(12)) engine=rocksdb; +insert into t17 values ('row1', 'val1'); +insert into t17 values ('row1', 'val1-try2'); +ERROR 23000: Duplicate entry 'row1' for key 'PRIMARY' +insert into t17 values ('ROW1', 'val1-try2'); +ERROR 23000: Duplicate entry 'ROW1' for key 'PRIMARY' +insert into t17 values ('row2', 'val2'); +insert into t17 values ('row3', 'val3'); +# This is ok +update t17 set pk='row4' where pk='row1'; +# This will try to overwrite another row: +update t17 set pk='row3' where pk='row2'; +ERROR 23000: Duplicate entry 'row3' for key 'PRIMARY' +select * from t17; +pk col1 +row2 val2 +row3 val3 +row4 val1 +# +# Locking tests +# +connect con1,localhost,root,,; +# First, make sure there's no locking when transactions update different rows +connection con1; +set autocommit=0; +update t17 set col1='UPD1' where pk='row2'; +connection default; +update t17 set col1='UPD2' where pk='row3'; +connection con1; +commit; +connection default; +select * from t17; +pk col1 +row2 UPD1 +row3 UPD2 +row4 val1 +# Check the variable +show variables like 'rocksdb_lock_wait_timeout'; +Variable_name Value +rocksdb_lock_wait_timeout 1 +set rocksdb_lock_wait_timeout=2; +show variables like 'rocksdb_lock_wait_timeout'; +Variable_name Value +rocksdb_lock_wait_timeout 2 +# Try updating the same row from two transactions +connection con1; +begin; +update t17 set col1='UPD2-AA' where pk='row2'; +connection default; +update t17 set col1='UPD2-BB' where pk='row2'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +set rocksdb_lock_wait_timeout=1000; +update t17 set col1='UPD2-CC' where pk='row2'; +connection con1; +rollback; +connection default; +select * from t17 where pk='row2'; +pk col1 +row2 UPD2-CC +drop table t17; +disconnect con1; +# +# MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable) +# +create table t18 (pk int primary key, i int) engine=RocksDB; +begin; +select * from t18; +pk i +select * from t18 where pk = 1; +pk i +connect con1,localhost,root,,; +insert into t18 values (1,100); +connection default; +select * from t18; +pk i +select * from t18 where pk = 1; +pk i +commit; +drop table t18; +# +# MDEV-4036: RocksDB: INSERT .. ON DUPLICATE KEY UPDATE does not work, produces ER_DUP_KEY +# +create table t19 (pk int primary key, i int) engine=RocksDB; +insert into t19 values (1,1); +insert into t19 values (1,100) on duplicate key update i = 102; +select * from t19; +pk i +1 102 +drop table t19; +# MDEV-4037: RocksDB: REPLACE doesn't work, produces ER_DUP_KEY +create table t20 (pk int primary key, i int) engine=RocksDB; +insert into t20 values (1,1); +replace into t20 values (1,100); +select * from t20; +pk i +1 100 +drop table t20; +# +# MDEV-4041: Server crashes in Primary_key_comparator::get_hashnr on INSERT +# +create table t21 (v varbinary(16) primary key, i int) engine=RocksDB; +insert into t21 values ('a',1); +select * from t21; +v i +a 1 +drop table t21; +# +# MDEV-4047: RocksDB: Assertion `0' fails in Protocol::end_statement() on multi-table INSERT IGNORE +# +CREATE TABLE t22 (a int primary key) ENGINE=RocksDB; +INSERT INTO t22 VALUES (1),(2); +CREATE TABLE t23 (b int primary key) ENGINE=RocksDB; +INSERT INTO t23 SELECT * FROM t22; +DELETE IGNORE t22.*, t23.* FROM t22, t23 WHERE b < a; +DROP TABLE t22,t23; +# +# MDEV-4046: RocksDB: Multi-table DELETE locks itself and ends with ER_LOCK_WAIT_TIMEOUT +# +CREATE TABLE t24 (pk int primary key) ENGINE=RocksDB; +INSERT INTO t24 VALUES (1),(2); +CREATE TABLE t25 LIKE t24; +INSERT INTO t25 SELECT * FROM t24; +DELETE t25.* FROM t24, t25; +DROP TABLE t24,t25; +# +# MDEV-4044: RocksDB: UPDATE or DELETE with ORDER BY locks itself +# +create table t26 (pk int primary key, c char(1)) engine=RocksDB; +insert into t26 values (1,'a'),(2,'b'); +update t26 set c = 'x' order by pk limit 1; +delete from t26 order by pk limit 1; +select * from t26; +pk c +2 b +drop table t26; +# +# Test whether SELECT ... FOR UPDATE puts locks +# +create table t27(pk varchar(10) primary key, col1 varchar(20)) engine=RocksDB; +insert into t27 values +('row1', 'row1data'), +('row2', 'row2data'), +('row3', 'row3data'); +connection con1; +begin; +select * from t27 where pk='row3' for update; +pk col1 +row3 row3data +connection default; +set rocksdb_lock_wait_timeout=1; +update t27 set col1='row2-modified' where pk='row3'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +rollback; +connection default; +disconnect con1; +drop table t27; +# +# MDEV-4060: RocksDB: Assertion `! trx->batch' fails in +# +create table t28 (pk int primary key, a int) engine=RocksDB; +insert into t28 values (1,10),(2,20); +begin; +update t28 set a = 100 where pk = 3; +rollback; +select * from t28; +pk a +1 10 +2 20 +drop table t28; +# +# Secondary indexes +# +create table t30 ( +pk varchar(16) not null primary key, +key1 varchar(16) not null, +col1 varchar(16) not null, +key(key1) +) engine=rocksdb; +insert into t30 values ('row1', 'row1-key', 'row1-data'); +insert into t30 values ('row2', 'row2-key', 'row2-data'); +insert into t30 values ('row3', 'row3-key', 'row3-data'); +explain +select * from t30 where key1='row2-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 ref key1 key1 18 const # Using index condition +select * from t30 where key1='row2-key'; +pk key1 col1 +row2 row2-key row2-data +explain +select * from t30 where key1='row1'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 ref key1 key1 18 const # Using index condition +# This will produce nothing: +select * from t30 where key1='row1'; +pk key1 col1 +explain +select key1 from t30; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL key1 20 NULL # Using index +select key1 from t30; +key1 +row1-key +row2-key +row3-key +# Create a duplicate record +insert into t30 values ('row2a', 'row2-key', 'row2a-data'); +# Can we see it? +select * from t30 where key1='row2-key'; +pk key1 col1 +row2 row2-key row2-data +row2a row2-key row2a-data +delete from t30 where pk='row2'; +select * from t30 where key1='row2-key'; +pk key1 col1 +row2a row2-key row2a-data +# +# Range scans on secondary index +# +delete from t30; +insert into t30 values +('row1', 'row1-key', 'row1-data'), +('row2', 'row2-key', 'row2-data'), +('row3', 'row3-key', 'row3-data'), +('row4', 'row4-key', 'row4-data'), +('row5', 'row5-key', 'row5-data'); +analyze table t30; +Table Op Msg_type Msg_text +test.t30 analyze status OK +explain +select * from t30 where key1 <='row3-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 <='row3-key'; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 where key1 between 'row2-key' and 'row4-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 between 'row2-key' and 'row4-key'; +pk key1 col1 +row2 row2-key row2-data +row3 row3-key row3-data +row4 row4-key row4-data +explain +select * from t30 where key1 in ('row2-key','row4-key'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 in ('row2-key','row4-key'); +pk key1 col1 +row2 row2-key row2-data +row4 row4-key row4-data +explain +select key1 from t30 where key1 in ('row2-key','row4-key'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using where; Using index +select key1 from t30 where key1 in ('row2-key','row4-key'); +key1 +row2-key +row4-key +explain +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range key1 key1 18 NULL # Using index condition +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; +pk key1 col1 +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 order by key1 limit 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL key1 20 NULL # +select * from t30 order by key1 limit 3; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 order by key1 desc limit 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL key1 20 NULL # +select * from t30 order by key1 desc limit 3; +pk key1 col1 +row5 row5-key row5-data +row4 row4-key row4-data +row3 row3-key row3-data +# +# Range scans on primary key +# +explain +select * from t30 where pk <='row3'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where +select * from t30 where pk <='row3'; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +explain +select * from t30 where pk between 'row2' and 'row4'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where +select * from t30 where pk between 'row2' and 'row4'; +pk key1 col1 +row2 row2-key row2-data +row3 row3-key row3-data +row4 row4-key row4-data +explain +select * from t30 where pk in ('row2','row4'); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 range PRIMARY PRIMARY 18 NULL # Using where +select * from t30 where pk in ('row2','row4'); +pk key1 col1 +row2 row2-key row2-data +row4 row4-key row4-data +explain +select * from t30 order by pk limit 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t30 index NULL PRIMARY 18 NULL # +select * from t30 order by pk limit 3; +pk key1 col1 +row1 row1-key row1-data +row2 row2-key row2-data +row3 row3-key row3-data +drop table t30; +# +# MDEV-3841: RocksDB: Reading by PK prefix does not work +# +create table t31 (i int, j int, k int, primary key(i,j,k)) engine=RocksDB; +insert into t31 values (1,10,100),(2,20,200); +select * from t31 where i = 1; +i j k +1 10 100 +select * from t31 where j = 10; +i j k +1 10 100 +select * from t31 where k = 100; +i j k +1 10 100 +select * from t31 where i = 1 and j = 10; +i j k +1 10 100 +select * from t31 where i = 1 and k = 100; +i j k +1 10 100 +select * from t31 where j = 10 and k = 100; +i j k +1 10 100 +select * from t31 where i = 1 and j = 10 and k = 100; +i j k +1 10 100 +drop table t31; +# +# MDEV-4055: RocksDB: UPDATE/DELETE by a multi-part PK does not work +# +create table t32 (i int, j int, k int, primary key(i,j,k), a varchar(8)) engine=RocksDB; +insert into t32 values +(1,10,100,''), +(2,20,200,''); +select * from t32 where i = 1 and j = 10 and k = 100; +i j k a +1 10 100 +update t32 set a = 'updated' where i = 1 and j = 10 and k = 100; +select * from t32; +i j k a +1 10 100 updated +2 20 200 +drop table t32; +# +# MDEV-3841: RocksDB: Assertion `0' fails in ha_rocksdb::index_read_map on range select with ORDER BY .. DESC +# +CREATE TABLE t33 (pk INT PRIMARY KEY, a CHAR(1)) ENGINE=RocksDB; +INSERT INTO t33 VALUES (1,'a'),(2,'b'); +SELECT * FROM t33 WHERE pk <= 10 ORDER BY pk DESC; +pk a +2 b +1 a +DROP TABLE t33; +# +# MDEV-4081: RocksDB throws error 122 on an attempt to create a table with unique index +# +# Unique indexes can be created, but uniqueness won't be enforced +create table t33 (pk int primary key, u int, unique index(u)) engine=RocksDB; +drop table t33; +# +# MDEV-4077: RocksDB: Wrong result (duplicate row) on select with range +# +CREATE TABLE t34 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t34 VALUES (10),(11); +SELECT pk FROM t34 WHERE pk > 5 AND pk < 15; +pk +10 +11 +SELECT pk FROM t34 WHERE pk BETWEEN 5 AND 15; +pk +10 +11 +SELECT pk FROM t34 WHERE pk > 5; +pk +10 +11 +SELECT pk FROM t34 WHERE pk < 15; +pk +10 +11 +drop table t34; +# +# MDEV-4086: RocksDB does not allow a query with multi-part pk and index and ORDER BY .. DEC +# +create table t35 (a int, b int, c int, d int, e int, primary key (a,b,c), key (a,c,d,e)) engine=RocksDB; +insert into t35 values (1,1,1,1,1),(2,2,2,2,2); +select * from t35 where a = 1 and c = 1 and d = 1 order by e desc; +a b c d e +1 1 1 1 1 +drop table t35; +# +# MDEV-4084: RocksDB: Wrong result on IN subquery with index +# +CREATE TABLE t36 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t36 VALUES (1,10),(2,20); +SELECT 3 IN ( SELECT a FROM t36 ); +3 IN ( SELECT a FROM t36 ) +0 +drop table t36; +# +# MDEV-4084: RocksDB: Wrong result on IN subquery with index +# +CREATE TABLE t37 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a), KEY(a,b)) +ENGINE=RocksDB; +INSERT INTO t37 VALUES (1,10,'x'), (2,20,'y'); +SELECT MAX(a) FROM t37 WHERE a < 100; +MAX(a) +20 +DROP TABLE t37; +# +# MDEV-4090: RocksDB: Wrong result (duplicate rows) on range access with secondary key and ORDER BY DESC +# +CREATE TABLE t38 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t38 VALUES (1,10), (2,20); +SELECT i FROM t38 WHERE i NOT IN (8) ORDER BY i DESC; +i +20 +10 +drop table t38; +# +# MDEV-4092: RocksDB: Assertion `in_table(pa, a_len)' fails in Rdb_key_def::cmp_full_keys +# with a multi-part key and ORDER BY .. DESC +# +CREATE TABLE t40 (pk1 INT PRIMARY KEY, a INT, b VARCHAR(1), KEY(b,a)) ENGINE=RocksDB; +INSERT INTO t40 VALUES (1, 7,'x'),(2,8,'y'); +CREATE TABLE t41 (pk2 INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t41 VALUES (1),(2); +SELECT * FROM t40, t41 WHERE pk1 = pk2 AND b = 'o' ORDER BY a DESC; +pk1 a b pk2 +DROP TABLE t40,t41; +# +# MDEV-4093: RocksDB: IN subquery by secondary key with NULL among values returns true instead of NULL +# +CREATE TABLE t42 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t42 VALUES (1, NULL),(2, 8); +SELECT ( 3 ) NOT IN ( SELECT a FROM t42 ); +( 3 ) NOT IN ( SELECT a FROM t42 ) +NULL +DROP TABLE t42; +# +# MDEV-4094: RocksDB: Wrong result on SELECT and ER_KEY_NOT_FOUND on +# DELETE with search by NULL-able secondary key ... +# +CREATE TABLE t43 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a)) ENGINE=RocksDB; +INSERT INTO t43 VALUES (1,8,'g'),(2,9,'x'); +UPDATE t43 SET pk = 10 WHERE a = 8; +REPLACE INTO t43 ( a ) VALUES ( 8 ); +Warnings: +Warning 1364 Field 'pk' doesn't have a default value +REPLACE INTO t43 ( b ) VALUES ( 'y' ); +Warnings: +Warning 1364 Field 'pk' doesn't have a default value +SELECT * FROM t43 WHERE a = 8; +pk a b +10 8 g +DELETE FROM t43 WHERE a = 8; +DROP TABLE t43; +# +# Basic AUTO_INCREMENT tests +# +create table t44(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +insert into t44 (col1) values ('row1'); +insert into t44 (col1) values ('row2'); +insert into t44 (col1) values ('row3'); +select * from t44; +pk col1 +1 row1 +2 row2 +3 row3 +drop table t44; +# +# ALTER TABLE tests +# +create table t45 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t45 values (1, 'row1'); +insert into t45 values (2, 'row2'); +alter table t45 rename t46; +select * from t46; +pk col1 +1 row1 +2 row2 +drop table t46; +drop table t45; +ERROR 42S02: Unknown table 'test.t45' +# +# Check Bulk loading +# Bulk loading used to overwrite existing data +# Now it fails if there is data overlap with what +# already exists +# +show variables +where +variable_name like 'rocksdb%' and +variable_name not like 'rocksdb_supported_compression_types'; +Variable_name Value +rocksdb_access_hint_on_compaction_start 1 +rocksdb_advise_random_on_open ON +rocksdb_allow_concurrent_memtable_write OFF +rocksdb_allow_mmap_reads OFF +rocksdb_allow_mmap_writes OFF +rocksdb_background_sync OFF +rocksdb_base_background_compactions 1 +rocksdb_blind_delete_primary_key OFF +rocksdb_block_cache_size 536870912 +rocksdb_block_restart_interval 16 +rocksdb_block_size 4096 +rocksdb_block_size_deviation 10 +rocksdb_bulk_load OFF +rocksdb_bulk_load_size 1000 +rocksdb_bytes_per_sync 0 +rocksdb_cache_index_and_filter_blocks ON +rocksdb_checksums_pct 100 +rocksdb_collect_sst_properties ON +rocksdb_commit_in_the_middle OFF +rocksdb_compact_cf +rocksdb_compaction_readahead_size 0 +rocksdb_compaction_sequential_deletes 0 +rocksdb_compaction_sequential_deletes_count_sd OFF +rocksdb_compaction_sequential_deletes_file_size 0 +rocksdb_compaction_sequential_deletes_window 0 +rocksdb_create_checkpoint +rocksdb_create_if_missing ON +rocksdb_create_missing_column_families OFF +rocksdb_datadir ./.rocksdb +rocksdb_db_write_buffer_size 0 +rocksdb_deadlock_detect OFF +rocksdb_debug_optimizer_no_zero_cardinality ON +rocksdb_default_cf_options +rocksdb_delayed_write_rate 16777216 +rocksdb_delete_obsolete_files_period_micros 21600000000 +rocksdb_enable_2pc ON +rocksdb_enable_bulk_load_api ON +rocksdb_enable_thread_tracking OFF +rocksdb_enable_write_thread_adaptive_yield OFF +rocksdb_error_if_exists OFF +rocksdb_flush_log_at_trx_commit 0 +rocksdb_flush_memtable_on_analyze ON +rocksdb_force_compute_memtable_stats ON +rocksdb_force_flush_memtable_now OFF +rocksdb_force_index_records_in_range 0 +rocksdb_hash_index_allow_collision ON +rocksdb_index_type kBinarySearch +rocksdb_info_log_level error_level +rocksdb_is_fd_close_on_exec ON +rocksdb_keep_log_file_num 1000 +rocksdb_lock_scanned_rows OFF +rocksdb_lock_wait_timeout 1 +rocksdb_log_file_time_to_roll 0 +rocksdb_manifest_preallocation_size 4194304 +rocksdb_master_skip_tx_api OFF +rocksdb_max_background_compactions 1 +rocksdb_max_background_flushes 1 +rocksdb_max_log_file_size 0 +rocksdb_max_manifest_file_size 18446744073709551615 +rocksdb_max_open_files -1 +rocksdb_max_row_locks 1073741824 +rocksdb_max_subcompactions 1 +rocksdb_max_total_wal_size 0 +rocksdb_merge_buf_size 67108864 +rocksdb_merge_combine_read_size 1073741824 +rocksdb_new_table_reader_for_compaction_inputs OFF +rocksdb_no_block_cache OFF +rocksdb_override_cf_options +rocksdb_paranoid_checks ON +rocksdb_pause_background_work ON +rocksdb_perf_context_level 0 +rocksdb_persistent_cache_path +rocksdb_persistent_cache_size_mb 0 +rocksdb_pin_l0_filter_and_index_blocks_in_cache ON +rocksdb_print_snapshot_conflict_queries OFF +rocksdb_rate_limiter_bytes_per_sec 0 +rocksdb_read_free_rpl_tables +rocksdb_records_in_range 50 +rocksdb_seconds_between_stat_computes 3600 +rocksdb_signal_drop_index_thread OFF +rocksdb_skip_bloom_filter_on_read OFF +rocksdb_skip_fill_cache OFF +rocksdb_skip_unique_check_tables .* +rocksdb_stats_dump_period_sec 600 +rocksdb_store_row_debug_checksums OFF +rocksdb_strict_collation_check OFF +rocksdb_strict_collation_exceptions +rocksdb_table_cache_numshardbits 6 +rocksdb_table_stats_sampling_pct 10 +rocksdb_tmpdir +rocksdb_trace_sst_api OFF +rocksdb_unsafe_for_binlog OFF +rocksdb_use_adaptive_mutex OFF +rocksdb_use_direct_reads OFF +rocksdb_use_direct_writes OFF +rocksdb_use_fsync OFF +rocksdb_validate_tables 1 +rocksdb_verify_row_debug_checksums OFF +rocksdb_wal_bytes_per_sync 0 +rocksdb_wal_dir +rocksdb_wal_recovery_mode 1 +rocksdb_wal_size_limit_mb 0 +rocksdb_wal_ttl_seconds 0 +rocksdb_whole_key_filtering ON +rocksdb_write_disable_wal OFF +rocksdb_write_ignore_missing_column_families OFF +create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t47 values (1, 'row1'); +insert into t47 values (2, 'row2'); +set rocksdb_bulk_load=1; +insert into t47 values (3, 'row3'),(4, 'row4'); +set rocksdb_bulk_load=0; +connect con1,localhost,root,,; +set rocksdb_bulk_load=1; +insert into t47 values (10, 'row10'),(11, 'row11'); +connection default; +set rocksdb_bulk_load=1; +insert into t47 values (100, 'row100'),(101, 'row101'); +disconnect con1; +connection default; +set rocksdb_bulk_load=0; +select * from t47; +pk col1 +1 row1 +2 row2 +3 row3 +4 row4 +10 row10 +11 row11 +100 row100 +101 row101 +drop table t47; +# +# Fix TRUNCATE over empty table (transaction is committed when it wasn't +# started) +# +create table t48(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +set autocommit=0; +truncate table t48; +set autocommit=1; +drop table t48; +# +# MDEV-4059: RocksDB: query waiting for a lock cannot be killed until query timeout exceeded +# +create table t49 (pk int primary key, a int) engine=RocksDB; +insert into t49 values (1,10),(2,20); +begin; +update t49 set a = 100 where pk = 1; +connect con1,localhost,root,,; +set rocksdb_lock_wait_timeout=60; +set @var1= to_seconds(now()); +update t49 set a = 1000 where pk = 1; +connect con2,localhost,root,,; +kill query $con1_id; +connection con1; +ERROR 70100: Query execution was interrupted +set @var2= to_seconds(now()); +select if ((@var2 - @var1) < 60, "passed", (@var2 - @var1)) as 'result'; +result +passed +connection default; +disconnect con1; +commit; +drop table t49; +# +# Index-only tests for INT-based columns +# +create table t1 (pk int primary key, key1 int, col1 int, key(key1)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,2,2); +insert into t1 values (-5,-5,-5); +# INT column uses index-only: +explain +select key1 from t1 where key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 5 const # Using index +select key1 from t1 where key1=2; +key1 +2 +select key1 from t1 where key1=-5; +key1 +-5 +drop table t1; +create table t2 (pk int primary key, key1 int unsigned, col1 int, key(key1)) engine=rocksdb; +insert into t2 values (1,1,1), (2,2,2); +# INT UNSIGNED column uses index-only: +explain +select key1 from t2 where key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref key1 key1 5 const # Using index +select key1 from t2 where key1=2; +key1 +2 +drop table t2; +create table t3 (pk bigint primary key, key1 bigint, col1 int, key(key1)) engine=rocksdb; +insert into t3 values (1,1,1), (2,2,2); +# BIGINT uses index-only: +explain +select key1 from t3 where key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref key1 key1 9 const # Using index +select key1 from t3 where key1=2; +key1 +2 +drop table t3; +# +# Index-only reads for string columns +# +create table t1 ( +pk int primary key, +key1 char(10) character set binary, +col1 int, +key (key1) +) engine=rocksdb; +insert into t1 values(1, 'one',11), (2,'two',22); +explain +select key1 from t1 where key1='one'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 11 const # Using where; Using index +# The following will produce no rows. This looks like a bug, +# but it is actually correct behavior. Binary strings are end-padded +# with \0 character (and not space). Comparison does not ignore +# the tail of \0. +select key1 from t1 where key1='one'; +key1 +explain +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 11 const # Using where; Using index +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +hex(key1) +6F6E6500000000000000 +drop table t1; +create table t2 ( +pk int primary key, +key1 char(10) collate latin1_bin, +col1 int, +key (key1) +) engine=rocksdb; +insert into t2 values(1, 'one',11), (2,'two',22); +explain +select key1 from t2 where key1='one'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref key1 key1 11 const # Using where; Using index +select key1 from t2 where key1='one'; +key1 +one +drop table t2; +create table t3 ( +pk int primary key, +key1 char(10) collate utf8_bin, +col1 int, +key (key1) +) engine=rocksdb; +insert into t3 values(1, 'one',11), (2,'two',22); +explain +select key1 from t3 where key1='one'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref key1 key1 31 const # Using where; Using index +select key1 from t3 where key1='one'; +key1 +one +drop table t3; +# a VARCHAR column +create table t4 ( +pk int primary key, +key1 varchar(10) collate latin1_bin, +key(key1) +) engine=rocksdb; +insert into t4 values(1, 'one'), (2,'two'),(3,'threee'),(55,'fifty-five'); +explain +select key1 from t4 where key1='two'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref key1 key1 13 const # Using where; Using index +select key1 from t4 where key1='two'; +key1 +two +select key1 from t4 where key1='fifty-five'; +key1 +fifty-five +explain +select key1 from t4 where key1 between 's' and 'u'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 range key1 key1 13 NULL # Using where; Using index +select key1 from t4 where key1 between 's' and 'u'; +key1 +threee +two +drop table t4; +# +# MDEV-4305: RocksDB: Assertion `((keypart_map + 1) & keypart_map) == 0' fails in calculate_key_len +# +CREATE TABLE t1 (pk1 INT, pk2 CHAR(32), i INT, PRIMARY KEY(pk1,pk2), KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'test1',6),(2,'test2',8); +SELECT * FROM t1 WHERE i != 3 OR pk1 > 9; +pk1 pk2 i +1 test1 6 +2 test2 8 +DROP TABLE t1; +# +# MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort +# +call mtr.add_suppression("Sort aborted"); +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1),(2,2); +BEGIN; +UPDATE t1 SET i = 100; +connect con1,localhost,root,,test; +DELETE IGNORE FROM t1 ORDER BY i; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +disconnect con1; +connection default; +COMMIT; +DROP TABLE t1; +# +# MDEV-4324: RocksDB: Valgrind "Use of uninitialised value" warnings on inserting value into varchar field +# (testcase only) +# +CREATE TABLE t1 (pk INT PRIMARY KEY, c VARCHAR(4)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'), (2,'bar'); +DROP TABLE t1; +# +# MDEV-4304: RocksDB: Index-only scan by a field with utf8_bin collation returns garbage symbols +# +CREATE TABLE t1 (pk INT PRIMARY KEY, c1 CHAR(1), c2 CHAR(1), KEY(c1)) ENGINE=RocksDB CHARSET utf8 COLLATE utf8_bin; +INSERT INTO t1 VALUES (1,'h','h'); +SELECT * FROM t1; +pk c1 c2 +1 h h +SELECT c1 FROM t1; +c1 +h +DROP TABLE t1; +# +# MDEV-4300: RocksDB: Server crashes in inline_mysql_mutex_lock on SELECT .. FOR UPDATE +# +CREATE TABLE t2 (pk INT PRIMARY KEY, i INT, KEY (i)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,4),(2,5); +SELECT 1 FROM t2 WHERE i < 0 FOR UPDATE; +1 +DROP TABLE t2; +# +# MDEV-4301: RocksDB: Assertion `pack_info != __null' fails in Rdb_key_def::unpack_record +# +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, c CHAR(1), KEY(c,i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,4,'d'),(2,8,'e'); +SELECT MAX( pk ) FROM t1 WHERE i = 105 AND c = 'h'; +MAX( pk ) +NULL +DROP TABLE t1; +# +# MDEV-4337: RocksDB: Inconsistent results comparing a char field with an int field +# +create table t1 (c char(1), i int, primary key(c), key(i)) engine=RocksDB; +insert into t1 values ('2',2),('6',6); +select * from t1 where c = i; +c i +2 2 +6 6 +select * from t1 ignore index (i) where c = i; +c i +2 2 +6 6 +drop table t1; +# +# Test statement rollback inside a transaction +# +create table t1 (pk varchar(12) primary key) engine=rocksdb; +insert into t1 values ('old-val1'),('old-val2'); +create table t2 (pk varchar(12) primary key) engine=rocksdb; +insert into t2 values ('new-val2'),('old-val1'); +begin; +insert into t1 values ('new-val1'); +insert into t1 select * from t2; +ERROR 23000: Duplicate entry 'old-val1' for key 'PRIMARY' +commit; +select * from t1; +pk +new-val1 +old-val1 +old-val2 +drop table t1, t2; +# +# MDEV-4383: RocksDB: Wrong result of DELETE .. ORDER BY .. LIMIT: +# rows that should be deleted remain in the table +# +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t1 (pk) VALUES (NULL),(NULL); +BEGIN; +INSERT INTO t2 (pk) VALUES (NULL),(NULL); +INSERT INTO t1 (pk) VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +SELECT * FROM t1 ORDER BY pk LIMIT 9; +pk +1 +2 +3 +4 +5 +6 +7 +8 +affected rows: 8 +DELETE FROM t1 ORDER BY pk LIMIT 9; +affected rows: 8 +SELECT * FROM t1 ORDER BY pk LIMIT 9; +pk +affected rows: 0 +DROP TABLE t1,t2; +# +# MDEV-4374: RocksDB: Valgrind warnings 'Use of uninitialised value' on +# inserting into a varchar column +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a VARCHAR(32)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); +DROP TABLE t1; +# +# MDEV-4061: RocksDB: Changes from an interrupted query are still applied +# +create table t1 (pk int primary key, a int) engine=RocksDB; +insert into t1 values (1,10),(2,20); +set autocommit = 1; +update t1 set a = sleep(100) where pk = 1; +connect con1,localhost,root,,; +kill query $con_id; +connection default; +ERROR 70100: Query execution was interrupted +select * from t1; +pk a +1 10 +2 20 +disconnect con1; +drop table t1; +# +# MDEV-4099: RocksDB: Wrong results with index and range access after INSERT IGNORE or REPLACE +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB; +INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0); +Warnings: +Warning 1062 Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM t1; +pk a b +1 157 0 +2 1898 -504403 +SELECT pk FROM t1; +pk +1 +2 +SELECT * FROM t1 WHERE a != 97; +pk a b +1 157 0 +2 1898 -504403 +DROP TABLE t1; +# +# Test @@rocksdb_max_row_locks +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a int) ENGINE=RocksDB; +set @a=-1; +insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; +set @tmp1= @@rocksdb_max_row_locks; +set rocksdb_max_row_locks= 20; +update t1 set a=a+10; +ERROR HY000: Got error 197 'Number of locks held reached @@rocksdb_max_row_locks.' from ROCKSDB +DROP TABLE t1; +# +# Test AUTO_INCREMENT behavior problem, +# "explicit insert into an auto-inc column is not noticed by RocksDB" +# +create table t1 (i int primary key auto_increment) engine=RocksDB; +insert into t1 values (null); +insert into t1 values (null); +select * from t1; +i +1 +2 +drop table t1; +create table t2 (i int primary key auto_increment) engine=RocksDB; +insert into t2 values (1); +select * from t2; +i +1 +# this fails (ie. used to fail), RocksDB engine did not notice use of '1' above +insert into t2 values (null); +select * from t2; +i +1 +2 +# but then this succeeds, so previous statement must have incremented next number counter +insert into t2 values (null); +select * from t2; +i +1 +2 +3 +drop table t2; +# +# Fix Issue#2: AUTO_INCREMENT value doesn't survive server shutdown +# +create table t1 (i int primary key auto_increment) engine=RocksDB; +insert into t1 values (null); +insert into t1 values (null); +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +insert into t1 values (null); +select * from t1; +i +1 +2 +3 +drop table t1; +# +# Fix Issue #3: SHOW TABLE STATUS shows Auto_increment=0 +# +create table t1 (i int primary key auto_increment) engine=RocksDB; +insert into t1 values (null),(null); +show table status like 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 1000 0 # 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL +drop table t1; +# +# Fix Issue #4: Crash when using pseudo-unique keys +# +CREATE TABLE t1 ( +i INT, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT, +pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, +UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +DROP TABLE t1; +# +# Fix issue #5: Transaction rollback doesn't undo all changes. +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (id int auto_increment primary key, value int) engine=rocksdb; +set autocommit=0; +begin; +set @a:=0; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +rollback; +select count(*) from t1; +count(*) +0 +set autocommit=1; +drop table t0, t1; +# +# Check status variables +# +show status like 'rocksdb%'; +Variable_name Value +Rocksdb_rows_deleted # +Rocksdb_rows_inserted # +Rocksdb_rows_read # +Rocksdb_rows_updated # +Rocksdb_rows_deleted_blind # +Rocksdb_system_rows_deleted # +Rocksdb_system_rows_inserted # +Rocksdb_system_rows_read # +Rocksdb_system_rows_updated # +Rocksdb_block_cache_add # +Rocksdb_block_cache_data_hit # +Rocksdb_block_cache_data_miss # +Rocksdb_block_cache_filter_hit # +Rocksdb_block_cache_filter_miss # +Rocksdb_block_cache_hit # +Rocksdb_block_cache_index_hit # +Rocksdb_block_cache_index_miss # +Rocksdb_block_cache_miss # +Rocksdb_block_cachecompressed_hit # +Rocksdb_block_cachecompressed_miss # +Rocksdb_bloom_filter_prefix_checked # +Rocksdb_bloom_filter_prefix_useful # +Rocksdb_bloom_filter_useful # +Rocksdb_bytes_read # +Rocksdb_bytes_written # +Rocksdb_compact_read_bytes # +Rocksdb_compact_write_bytes # +Rocksdb_compaction_key_drop_new # +Rocksdb_compaction_key_drop_obsolete # +Rocksdb_compaction_key_drop_user # +Rocksdb_flush_write_bytes # +Rocksdb_getupdatessince_calls # +Rocksdb_l0_num_files_stall_micros # +Rocksdb_l0_slowdown_micros # +Rocksdb_memtable_compaction_micros # +Rocksdb_memtable_hit # +Rocksdb_memtable_miss # +Rocksdb_no_file_closes # +Rocksdb_no_file_errors # +Rocksdb_no_file_opens # +Rocksdb_num_iterators # +Rocksdb_number_block_not_compressed # +Rocksdb_number_deletes_filtered # +Rocksdb_number_keys_read # +Rocksdb_number_keys_updated # +Rocksdb_number_keys_written # +Rocksdb_number_merge_failures # +Rocksdb_number_multiget_bytes_read # +Rocksdb_number_multiget_get # +Rocksdb_number_multiget_keys_read # +Rocksdb_number_reseeks_iteration # +Rocksdb_number_sst_entry_delete # +Rocksdb_number_sst_entry_merge # +Rocksdb_number_sst_entry_other # +Rocksdb_number_sst_entry_put # +Rocksdb_number_sst_entry_singledelete # +Rocksdb_number_stat_computes # +Rocksdb_number_superversion_acquires # +Rocksdb_number_superversion_cleanups # +Rocksdb_number_superversion_releases # +Rocksdb_rate_limit_delay_millis # +Rocksdb_snapshot_conflict_errors # +Rocksdb_wal_bytes # +Rocksdb_wal_group_syncs # +Rocksdb_wal_synced # +Rocksdb_write_other # +Rocksdb_write_self # +Rocksdb_write_timedout # +Rocksdb_write_wal # +select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%'; +VARIABLE_NAME +ROCKSDB_ROWS_DELETED +ROCKSDB_ROWS_INSERTED +ROCKSDB_ROWS_READ +ROCKSDB_ROWS_UPDATED +ROCKSDB_ROWS_DELETED_BLIND +ROCKSDB_SYSTEM_ROWS_DELETED +ROCKSDB_SYSTEM_ROWS_INSERTED +ROCKSDB_SYSTEM_ROWS_READ +ROCKSDB_SYSTEM_ROWS_UPDATED +ROCKSDB_BLOCK_CACHE_ADD +ROCKSDB_BLOCK_CACHE_DATA_HIT +ROCKSDB_BLOCK_CACHE_DATA_MISS +ROCKSDB_BLOCK_CACHE_FILTER_HIT +ROCKSDB_BLOCK_CACHE_FILTER_MISS +ROCKSDB_BLOCK_CACHE_HIT +ROCKSDB_BLOCK_CACHE_INDEX_HIT +ROCKSDB_BLOCK_CACHE_INDEX_MISS +ROCKSDB_BLOCK_CACHE_MISS +ROCKSDB_BLOCK_CACHECOMPRESSED_HIT +ROCKSDB_BLOCK_CACHECOMPRESSED_MISS +ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED +ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL +ROCKSDB_BLOOM_FILTER_USEFUL +ROCKSDB_BYTES_READ +ROCKSDB_BYTES_WRITTEN +ROCKSDB_COMPACT_READ_BYTES +ROCKSDB_COMPACT_WRITE_BYTES +ROCKSDB_COMPACTION_KEY_DROP_NEW +ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE +ROCKSDB_COMPACTION_KEY_DROP_USER +ROCKSDB_FLUSH_WRITE_BYTES +ROCKSDB_GETUPDATESSINCE_CALLS +ROCKSDB_L0_NUM_FILES_STALL_MICROS +ROCKSDB_L0_SLOWDOWN_MICROS +ROCKSDB_MEMTABLE_COMPACTION_MICROS +ROCKSDB_MEMTABLE_HIT +ROCKSDB_MEMTABLE_MISS +ROCKSDB_NO_FILE_CLOSES +ROCKSDB_NO_FILE_ERRORS +ROCKSDB_NO_FILE_OPENS +ROCKSDB_NUM_ITERATORS +ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED +ROCKSDB_NUMBER_DELETES_FILTERED +ROCKSDB_NUMBER_KEYS_READ +ROCKSDB_NUMBER_KEYS_UPDATED +ROCKSDB_NUMBER_KEYS_WRITTEN +ROCKSDB_NUMBER_MERGE_FAILURES +ROCKSDB_NUMBER_MULTIGET_BYTES_READ +ROCKSDB_NUMBER_MULTIGET_GET +ROCKSDB_NUMBER_MULTIGET_KEYS_READ +ROCKSDB_NUMBER_RESEEKS_ITERATION +ROCKSDB_NUMBER_SST_ENTRY_DELETE +ROCKSDB_NUMBER_SST_ENTRY_MERGE +ROCKSDB_NUMBER_SST_ENTRY_OTHER +ROCKSDB_NUMBER_SST_ENTRY_PUT +ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE +ROCKSDB_NUMBER_STAT_COMPUTES +ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES +ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS +ROCKSDB_NUMBER_SUPERVERSION_RELEASES +ROCKSDB_RATE_LIMIT_DELAY_MILLIS +ROCKSDB_SNAPSHOT_CONFLICT_ERRORS +ROCKSDB_WAL_BYTES +ROCKSDB_WAL_GROUP_SYNCS +ROCKSDB_WAL_SYNCED +ROCKSDB_WRITE_OTHER +ROCKSDB_WRITE_SELF +ROCKSDB_WRITE_TIMEDOUT +ROCKSDB_WRITE_WAL +# RocksDB-SE's status variables are global internally +# but they are shown as both session and global, like InnoDB's status vars. +select VARIABLE_NAME from INFORMATION_SCHEMA.session_status where VARIABLE_NAME LIKE 'rocksdb%'; +VARIABLE_NAME +ROCKSDB_ROWS_DELETED +ROCKSDB_ROWS_INSERTED +ROCKSDB_ROWS_READ +ROCKSDB_ROWS_UPDATED +ROCKSDB_ROWS_DELETED_BLIND +ROCKSDB_SYSTEM_ROWS_DELETED +ROCKSDB_SYSTEM_ROWS_INSERTED +ROCKSDB_SYSTEM_ROWS_READ +ROCKSDB_SYSTEM_ROWS_UPDATED +ROCKSDB_BLOCK_CACHE_ADD +ROCKSDB_BLOCK_CACHE_DATA_HIT +ROCKSDB_BLOCK_CACHE_DATA_MISS +ROCKSDB_BLOCK_CACHE_FILTER_HIT +ROCKSDB_BLOCK_CACHE_FILTER_MISS +ROCKSDB_BLOCK_CACHE_HIT +ROCKSDB_BLOCK_CACHE_INDEX_HIT +ROCKSDB_BLOCK_CACHE_INDEX_MISS +ROCKSDB_BLOCK_CACHE_MISS +ROCKSDB_BLOCK_CACHECOMPRESSED_HIT +ROCKSDB_BLOCK_CACHECOMPRESSED_MISS +ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED +ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL +ROCKSDB_BLOOM_FILTER_USEFUL +ROCKSDB_BYTES_READ +ROCKSDB_BYTES_WRITTEN +ROCKSDB_COMPACT_READ_BYTES +ROCKSDB_COMPACT_WRITE_BYTES +ROCKSDB_COMPACTION_KEY_DROP_NEW +ROCKSDB_COMPACTION_KEY_DROP_OBSOLETE +ROCKSDB_COMPACTION_KEY_DROP_USER +ROCKSDB_FLUSH_WRITE_BYTES +ROCKSDB_GETUPDATESSINCE_CALLS +ROCKSDB_L0_NUM_FILES_STALL_MICROS +ROCKSDB_L0_SLOWDOWN_MICROS +ROCKSDB_MEMTABLE_COMPACTION_MICROS +ROCKSDB_MEMTABLE_HIT +ROCKSDB_MEMTABLE_MISS +ROCKSDB_NO_FILE_CLOSES +ROCKSDB_NO_FILE_ERRORS +ROCKSDB_NO_FILE_OPENS +ROCKSDB_NUM_ITERATORS +ROCKSDB_NUMBER_BLOCK_NOT_COMPRESSED +ROCKSDB_NUMBER_DELETES_FILTERED +ROCKSDB_NUMBER_KEYS_READ +ROCKSDB_NUMBER_KEYS_UPDATED +ROCKSDB_NUMBER_KEYS_WRITTEN +ROCKSDB_NUMBER_MERGE_FAILURES +ROCKSDB_NUMBER_MULTIGET_BYTES_READ +ROCKSDB_NUMBER_MULTIGET_GET +ROCKSDB_NUMBER_MULTIGET_KEYS_READ +ROCKSDB_NUMBER_RESEEKS_ITERATION +ROCKSDB_NUMBER_SST_ENTRY_DELETE +ROCKSDB_NUMBER_SST_ENTRY_MERGE +ROCKSDB_NUMBER_SST_ENTRY_OTHER +ROCKSDB_NUMBER_SST_ENTRY_PUT +ROCKSDB_NUMBER_SST_ENTRY_SINGLEDELETE +ROCKSDB_NUMBER_STAT_COMPUTES +ROCKSDB_NUMBER_SUPERVERSION_ACQUIRES +ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS +ROCKSDB_NUMBER_SUPERVERSION_RELEASES +ROCKSDB_RATE_LIMIT_DELAY_MILLIS +ROCKSDB_SNAPSHOT_CONFLICT_ERRORS +ROCKSDB_WAL_BYTES +ROCKSDB_WAL_GROUP_SYNCS +ROCKSDB_WAL_SYNCED +ROCKSDB_WRITE_OTHER +ROCKSDB_WRITE_SELF +ROCKSDB_WRITE_TIMEDOUT +ROCKSDB_WRITE_WAL +# +# Fix issue #9: HA_ERR_INTERNAL_ERROR when running linkbench +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int primary key, +col1 varchar(255), +key(col1) +) engine=rocksdb; +insert into t1 select a, repeat('123456789ABCDEF-', 15) from t0; +select * from t1 where pk=3; +pk col1 +3 123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF-123456789ABCDEF- +drop table t0, t1; +# +# Fix issue #10: Segfault in Rdb_key_def::get_primary_key_tuple +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +CREATE TABLE t1 ( +id1 bigint(20) unsigned NOT NULL DEFAULT '0', +id2 bigint(20) unsigned NOT NULL DEFAULT '0', +link_type bigint(20) unsigned NOT NULL DEFAULT '0', +visibility tinyint(3) NOT NULL DEFAULT '0', +data varchar(255) NOT NULL DEFAULT '', +time bigint(20) unsigned NOT NULL DEFAULT '0', +version int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (link_type,id1,id2) +) engine=rocksdb; +insert into t1 select a,a,a,1,a,a,a from t0; +alter table t1 add index id1_type (id1,link_type,visibility,time,version,data); +select * from t1 where id1 = 3; +id1 id2 link_type visibility data time version +3 3 3 1 3 3 3 +drop table t0,t1; +# +# Test column families +# +create table t1 ( +pk int primary key, +col1 int, +col2 int, +key(col1) comment 'cf3', +key(col2) comment 'cf4' +) engine=rocksdb; +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); +explain +select * from t1 where col1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const # +select * from t1 where col1=2; +pk col1 col2 +2 2 2 +explain +select * from t1 where col2=3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col2 col2 5 const # +select * from t1 where col2=3; +pk col1 col2 +3 3 3 +select * from t1 where pk=4; +pk col1 col2 +4 4 4 +drop table t1; +# +# Try primary key in a non-default CF: +# +create table t1 ( +pk int, +col1 int, +col2 int, +key(col1) comment 'cf3', +key(col2) comment 'cf4', +primary key (pk) comment 'cf5' +) engine=rocksdb; +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); +explain +select * from t1 where col1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const # +select * from t1 where col1=2; +pk col1 col2 +2 2 2 +select * from t1 where pk=4; +pk col1 col2 +4 4 4 +drop table t1; +# +# Issue #15: SIGSEGV from reading in blob data +# +CREATE TABLE t1 ( +id int not null, +blob_col text, +PRIMARY KEY (id) +) ENGINE=ROCKSDB CHARSET=latin1; +INSERT INTO t1 SET id=123, blob_col=repeat('z',64000) ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +DROP TABLE t1; +# +# Issue #17: Automatic per-index column families +# +create table t1 ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment '$per_index_cf' +) engine=rocksdb; +#Same CF ids with different CF flags +create table t1_err ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment 'test.t1.key1' +) engine=rocksdb; +ERROR HY000: Column family ('test.t1.key1') flag (0) is different from an existing flag (2). Assign a new CF flag, or do not change existing CF flag. +create table t1_err ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment 'test.t1.key2' +) engine=rocksdb; +drop table t1_err; +# Unfortunately there is no way to check which column family everything goes to +insert into t1 values (1,1); +select * from t1; +id key1 +1 1 +# Check that ALTER and RENAME are disallowed +alter table t1 add col2 int; +ERROR 42000: This version of MariaDB doesn't yet support 'ALTER TABLE on table with per-index CF' +rename table t1 to t2; +ERROR 42000: This version of MariaDB doesn't yet support 'ALTER TABLE on table with per-index CF' +drop table t1; +# Check detection of typos in $per_index_cf +create table t1 ( +id int not null, +key1 int, +PRIMARY KEY (id), +index (key1) comment '$per_idnex_cf' +)engine=rocksdb; +ERROR 42000: This version of MariaDB doesn't yet support 'column family name looks like a typo of $per_index_cf.' +# +# Issue #22: SELECT ... FOR UPDATE takes a long time +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +id1 int, +id2 int, +value1 int, +value2 int, +primary key(id1, id2) COMMENT 'new_column_family', +key(id2) +) engine=rocksdb default charset=latin1 collate=latin1_bin; +insert into t1 select A.a, B.a, 31, 1234 from t0 A, t0 B; +explain +select * from t1 where id1=30 and value1=30 for update; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref PRIMARY PRIMARY 4 const # Using where +set @var1=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_read'); +select * from t1 where id1=3 and value1=3 for update; +id1 id2 value1 value2 +set @var2=(select variable_value +from information_schema.global_status +where variable_name='rocksdb_number_keys_read'); +# The following must return true (before the fix, the difference was 70): +select if((@var2 - @var1) < 30, 1, @var2-@var1); +if((@var2 - @var1) < 30, 1, @var2-@var1) +1 +drop table t0,t1; +# +# Issue #33: SELECT ... FROM rocksdb_table ORDER BY primary_key uses sorting +# +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +# The following must not use 'Using filesort': +explain select * from t1 ORDER BY id; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # +drop table t1; +# +# Issue #26: Index-only scans for DATETIME and TIMESTAMP +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +# Try a DATETIME column: +create table t1 ( +pk int auto_increment primary key, +kp1 datetime, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015-01-01 12:34:56 0 NULL +2 2015-01-02 12:34:56 1 NULL +3 2015-01-03 12:34:56 2 NULL +4 2015-01-04 12:34:56 3 NULL +5 2015-01-05 12:34:56 4 NULL +6 2015-01-06 12:34:56 5 NULL +7 2015-01-07 12:34:56 6 NULL +8 2015-01-08 12:34:56 7 NULL +9 2015-01-09 12:34:56 8 NULL +10 2015-01-10 12:34:56 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 6 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 datetime not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +drop table t1,t2; +# Try a DATE column: +create table t1 ( +pk int auto_increment primary key, +kp1 date, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01', interval a day), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015-01-01 0 NULL +2 2015-01-02 1 NULL +3 2015-01-03 2 NULL +4 2015-01-04 3 NULL +5 2015-01-05 4 NULL +6 2015-01-06 5 NULL +7 2015-01-07 6 NULL +8 2015-01-08 7 NULL +9 2015-01-09 8 NULL +10 2015-01-10 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; +kp1 kp2 +2015-01-01 0 +2015-01-02 1 +2015-01-03 2 +2015-01-04 3 +2015-01-05 4 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 date not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 0 +2015-01-02 1 +2015-01-03 2 +2015-01-04 3 +2015-01-05 4 +drop table t1,t2; +# +# Try a TIMESTAMP column: +# +create table t1 ( +pk int auto_increment primary key, +kp1 timestamp, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015-01-01 12:34:56 0 NULL +2 2015-01-02 12:34:56 1 NULL +3 2015-01-03 12:34:56 2 NULL +4 2015-01-04 12:34:56 3 NULL +5 2015-01-05 12:34:56 4 NULL +6 2015-01-06 12:34:56 5 NULL +7 2015-01-07 12:34:56 6 NULL +8 2015-01-08 12:34:56 7 NULL +9 2015-01-09 12:34:56 8 NULL +10 2015-01-10 12:34:56 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 5 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 timestamp not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 4 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +kp1 kp2 +2015-01-01 12:34:56 0 +2015-01-02 12:34:56 1 +2015-01-03 12:34:56 2 +2015-01-04 12:34:56 3 +2015-01-05 12:34:56 4 +drop table t1,t2; +# +# Try a TIME column: +# +create table t1 ( +pk int auto_increment primary key, +kp1 time, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 09:00:00', interval a minute), a from t0; +select * from t1; +pk kp1 kp2 col1 +1 09:00:00 0 NULL +2 09:01:00 1 NULL +3 09:02:00 2 NULL +4 09:03:00 3 NULL +5 09:04:00 4 NULL +6 09:05:00 5 NULL +7 09:06:00 6 NULL +8 09:07:00 7 NULL +9 09:08:00 8 NULL +10 09:09:00 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +kp1 kp2 +09:01:00 1 +09:02:00 2 +09:03:00 3 +09:04:00 4 +09:05:00 5 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 time not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +kp1 kp2 +09:01:00 1 +09:02:00 2 +09:03:00 3 +09:04:00 4 +09:05:00 5 +drop table t1,t2; +# +# Try a YEAR column: +# +create table t1 ( +pk int auto_increment primary key, +kp1 year, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) select 2015+a, a from t0; +select * from t1; +pk kp1 kp2 col1 +1 2015 0 NULL +2 2016 1 NULL +3 2017 2 NULL +4 2018 3 NULL +5 2019 4 NULL +6 2020 5 NULL +7 2021 6 NULL +8 2022 7 NULL +9 2023 8 NULL +10 2024 9 NULL +# This must show 'Using index' +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range kp1 kp1 2 NULL # Using where; Using index +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; +kp1 kp2 +2016 1 +2017 2 +2018 3 +2019 4 +2020 5 +# Now, the same with NOT NULL column +create table t2 ( +pk int auto_increment primary key, +kp1 year not null, +kp2 int, +col1 int, +key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +# This must show 'Using index' +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 1 NULL # Using where; Using index +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; +kp1 kp2 +2016 1 +2017 2 +2018 3 +2019 4 +2020 5 +drop table t1,t2; +# +# Issue #57: Release row locks on statement errors +# +create table t1 (id int primary key) engine=rocksdb; +insert into t1 values (1), (2), (3); +begin; +insert into t1 values (4), (5), (6); +insert into t1 values (7), (8), (2), (9); +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +select * from t1; +id +1 +2 +3 +4 +5 +6 +begin; +select * from t1 where id=4 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +select * from t1 where id=7 for update; +id +select * from t1 where id=9 for update; +id +drop table t1; +#Index on blob column +SET @old_mode = @@sql_mode; +SET sql_mode = 'strict_all_tables'; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(c, b(255))) engine=rocksdb; +drop table t1; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(1255))) engine=rocksdb; +insert into t1 values (1, '1abcde', '1abcde'), (2, '2abcde', '2abcde'), (3, '3abcde', '3abcde'); +select * from t1; +a b c +1 1abcde 1abcde +2 2abcde 2abcde +3 3abcde 3abcde +explain select * from t1 where b like '1%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range b b 1258 NULL # Using where +explain select b, a from t1 where b like '1%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range b b 1258 NULL # Using where +update t1 set b= '12345' where b = '2abcde'; +select * from t1; +a b c +1 1abcde 1abcde +2 12345 2abcde +3 3abcde 3abcde +drop table t1; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb; +Warnings: +Warning 1071 Specified key was too long; max key length is 2048 bytes +drop table t1; +SET sql_mode = @old_mode; +drop table t0; +# +# Fix assertion failure (attempt to overrun the key buffer) for prefix indexes +# +create table t1 ( +pk int primary key, +col1 varchar(100), +key (col1(10)) +) engine=rocksdb; +insert into t1 values (1, repeat('0123456789', 9)); +drop table t1; +# +# Issue #76: Assertion `buf == table->record[0]' fails in virtual int ha_rocksdb::delete_row(const uchar*) +# +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +CREATE TRIGGER tr AFTER DELETE ON t1 FOR EACH ROW DELETE FROM t2 WHERE pk = old.pk; +INSERT INTO t1 VALUES (1,1); +REPLACE INTO t1 VALUES (1,2); +SELECT * FROM t1; +pk f1 +1 2 +DROP TABLE t1, t2; +# +# Issue #99: UPDATE for table with VARCHAR pk gives "Can't find record" error +# +create table t1(a int primary key); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 ( +a varchar(32) primary key, +col1 int +) engine=rocksdb; +insert into t2 +select concat('v-', 100 + A.a*100 + B.a), 12345 from t1 A, t1 B; +update t2 set a=concat('x-', a) where a between 'v-1002' and 'v-1004'; +drop table t1,t2; +# +# Issue #131: Assertion `v->cfd_->internal_comparator().Compare(start, end) <= 0' failed +# +CREATE TABLE t2(c1 INTEGER UNSIGNED NOT NULL, c2 INTEGER NULL, c3 TINYINT, c4 SMALLINT , c5 MEDIUMINT, c6 INT, c7 BIGINT, PRIMARY KEY(c1,c6)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1,1,1,1,1,1); +SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +c1 c2 c3 c4 c5 c6 c7 +EXPLAIN SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 50 Using where +drop table t2; +# +# Issue #135: register transaction was not being called for statement +# +DROP DATABASE IF EXISTS test_db; +CREATE DATABASE test_db; +CREATE TABLE test_db.t1(c1 INT PRIMARY KEY); +LOCK TABLES test_db.t1 READ; +SET AUTOCOMMIT=0; +SELECT c1 FROM test_db.t1; +c1 +START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY; +DROP DATABASE test_db; +# +# Issue #143: Split rocksdb_bulk_load option into two +# +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +SET unique_checks=0; +INSERT INTO t1 VALUES(1, 1); +INSERT INTO t1 VALUES(1, 2); +INSERT INTO t1 VALUES(1, 3); +SELECT * FROM t1; +id value +REPLACE INTO t1 VALUES(4, 4); +ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: REPLACE INTO t1 VALUES(4, 4) +INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; +ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1 +TRUNCATE TABLE t1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +SET unique_checks=1; +SET rocksdb_commit_in_the_middle=1; +SET rocksdb_bulk_load_size=10; +BEGIN; +INSERT INTO t1 (id) VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10), +(11),(12),(13),(14),(15),(16),(17),(18),(19); +ROLLBACK; +SELECT * FROM t1; +id value +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +6 NULL +7 NULL +8 NULL +9 NULL +10 NULL +INSERT INTO t1 (id) VALUES (11),(12),(13),(14),(15); +BEGIN; +UPDATE t1 SET value=100; +ROLLBACK; +SELECT * FROM t1; +id value +1 100 +2 100 +3 100 +4 100 +5 100 +6 100 +7 100 +8 100 +9 100 +10 100 +11 NULL +12 NULL +13 NULL +14 NULL +15 NULL +BEGIN; +DELETE FROM t1; +ROLLBACK; +SELECT * FROM t1; +id value +11 NULL +12 NULL +13 NULL +14 NULL +15 NULL +SET rocksdb_commit_in_the_middle=0; +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +DROP TABLE t1; +# +# Issue #185 Assertion `BaseValid()' failed in void rocksdb::BaseDeltaIterator::Advance() +# +CREATE TABLE t2(id INT NOT NULL PRIMARY KEY, data INT) Engine=MEMORY; +INSERT INTO t2 VALUES (100,NULL),(150,"long varchar"),(200,"varchar"),(250,"long long long varchar"); +Warnings: +Warning 1366 Incorrect integer value: 'long varchar' for column 'data' at row 2 +Warning 1366 Incorrect integer value: 'varchar' for column 'data' at row 3 +Warning 1366 Incorrect integer value: 'long long long varchar' for column 'data' at row 4 +create TABLE t1 (a int not null, b int not null, primary key(a,b)); +INSERT INTO t1 VALUES (1,1); +SELECT a FROM t1, t2 WHERE a=b AND (b NOT IN (SELECT a FROM t1 WHERE a > 4)); +a +1 +1 +1 +1 +DROP TABLE t1, t2; +# +# Issue #189 ha_rocksdb::load_auto_incr_value() creates implicit snapshot and doesn't release +# +create table r1 (id int auto_increment primary key, value int); +insert into r1 (id) values (null), (null), (null), (null), (null); +create table r2 like r1; +show create table r2; +Table Create Table +r2 CREATE TABLE `r2` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `value` int(11) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +begin; +insert into r1 values (10, 1); +commit; +begin; +select * from r1; +id value +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +10 1 +commit; +drop table r1, r2; +create table r1 (id int auto_increment, value int, index i(id)); +insert into r1 (id) values (null), (null), (null), (null), (null); +create table r2 like r1; +show create table r2; +Table Create Table +r2 CREATE TABLE `r2` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `value` int(11) DEFAULT NULL, + KEY `i` (`id`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +begin; +insert into r1 values (10, 1); +commit; +begin; +select * from r1; +id value +1 NULL +2 NULL +3 NULL +4 NULL +5 NULL +10 1 +commit; +drop table r1, r2; +# +# Issue#211 Crash on LOCK TABLES + START TRANSACTION WITH CONSISTENT SNAPSHOT +# +CREATE TABLE t1(c1 INT); +lock TABLE t1 read local; +SELECT 1 FROM t1 GROUP BY TRIM(LEADING RAND()FROM''); +1 +set AUTOCOMMIT=0; +start transaction with consistent snapshot; +SELECT * FROM t1; +c1 +COMMIT; +UNLOCK TABLES; +DROP TABLE t1; +# +# Issue#213 Crash on LOCK TABLES + partitions +# +CREATE TABLE t1(a INT,b INT,KEY (b)) engine=rocksdb PARTITION BY HASH(a) PARTITIONS 2; +INSERT INTO t1(a)VALUES (20010101101010.999949); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +lock tables t1 write,t1 as t0 write,t1 as t2 write; +SELECT a FROM t1 ORDER BY a; +a +2147483647 +truncate t1; +INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020'); +Warnings: +Warning 1366 Incorrect integer value: '\x04 \x00 \x00 ' for column 'a' at row 1 +Warning 1366 Incorrect integer value: '\x04 \x00 \x00 ' for column 'b' at row 1 +Warning 1366 Incorrect integer value: ' \x04\x00 \x00 ' for column 'a' at row 2 +Warning 1366 Incorrect integer value: ' \x04\x00 \x00 ' for column 'b' at row 2 +UNLOCK TABLES; +DROP TABLE t1; +# +# Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type +# (the test was changed to use VARCHAR, because DECIMAL now supports index-only, and this issue +# needs a datype that doesn't support index-inly) +# +CREATE TABLE t1( +c1 varchar(10) character set utf8 collate utf8_general_ci NOT NULL, +c2 varchar(10) character set utf8 collate utf8_general_ci, +c3 INT, +INDEX idx(c1,c2) +); +INSERT INTO t1 VALUES ('c1-val1','c2-val1',5); +INSERT INTO t1 VALUES ('c1-val2','c2-val3',6); +INSERT INTO t1 VALUES ('c1-val3','c2-val3',7); +SELECT * FROM t1 force index(idx) WHERE c1 <> 'c1-val2' ORDER BY c1 DESC; +c1 c2 c3 +c1-val3 c2-val3 7 +c1-val1 c2-val1 5 +explain SELECT * FROM t1 force index(idx) WHERE c1 <> '1' ORDER BY c1 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx idx 32 NULL # Using where +drop table t1; +# +# Issue#267: MyRocks issue with no matching min/max row and count(*) +# +CREATE TABLE t1(c1 INT UNSIGNED, c2 INT SIGNED, INDEX idx2(c2)); +INSERT INTO t1 VALUES(1,null); +INSERT INTO t1 VALUES(2,null); +SELECT count(*) as total_rows, min(c2) as min_value FROM t1; +total_rows min_value +2 NULL +DROP TABLE t1; +# +# Issue#263: MyRocks auto_increment skips values if you insert a negative value +# +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(-1),(0); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 3 NULL NULL NULL latin1_swedish_ci NULL +SELECT * FROM t1; +a +-1 +1 +2 +DROP TABLE t1; +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(10),(0); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 1000 0 0 0 0 0 12 NULL NULL NULL latin1_swedish_ci NULL +SELECT * FROM t1; +a +1 +10 +11 +DROP TABLE t1; +# +# Issue #411: Setting rocksdb_commit_in_the_middle commits transaction +# without releasing iterator +# +CREATE TABLE t1 (id1 bigint(20), +id2 bigint(20), +id3 bigint(20), +PRIMARY KEY (id1, id2, id3)) +DEFAULT CHARSET=latin1; +CREATE TABLE t2 (id1 bigint(20), +id2 bigint(20), +PRIMARY KEY (id1, id2)) +DEFAULT CHARSET=latin1; +set rocksdb_commit_in_the_middle=1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +set rocksdb_bulk_load_size = 100; +DELETE t2, t1 FROM t2 LEFT JOIN t1 ON t2.id2 = t1.id2 AND t2.id1 = t1.id1 WHERE t2.id1 = 0; +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +SET rocksdb_commit_in_the_middle=0; +DROP TABLE t1, t2; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result new file mode 100644 index 0000000000000..6c3d85b760c1b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result @@ -0,0 +1,61 @@ +create table t1 (a int, +primary key (a) comment 'cf1') engine=rocksdb; +create table t2 (a int, +primary key (a) comment 'cf2') engine=rocksdb; +create table t3 (a int, +primary key (a) comment 'z') engine=rocksdb; +insert into t1 values (1); +insert into t2 values (2); +insert into t3 values (2); + +Default options for all column families: + +select cf_name, option_type, value +from information_schema.rocksdb_cf_options +where option_type in ('WRITE_BUFFER_SIZE', +'TARGET_FILE_SIZE_BASE', +'MAX_BYTES_FOR_LEVEL_MULTIPLIER') +order by cf_name, option_type; +cf_name option_type value +cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +cf1 TARGET_FILE_SIZE_BASE 1048576 +cf1 WRITE_BUFFER_SIZE 12582912 +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +cf2 TARGET_FILE_SIZE_BASE 1048576 +cf2 WRITE_BUFFER_SIZE 12582912 +default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +default TARGET_FILE_SIZE_BASE 1048576 +default WRITE_BUFFER_SIZE 12582912 +z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +z TARGET_FILE_SIZE_BASE 1048576 +z WRITE_BUFFER_SIZE 12582912 +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +__system__ TARGET_FILE_SIZE_BASE 1048576 +__system__ WRITE_BUFFER_SIZE 12582912 + +Individualized options for column families: + +select cf_name, option_type, value +from information_schema.rocksdb_cf_options +where option_type in ('WRITE_BUFFER_SIZE', +'TARGET_FILE_SIZE_BASE', +'MAX_BYTES_FOR_LEVEL_MULTIPLIER') +order by cf_name, option_type; +cf_name option_type value +cf1 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +cf1 TARGET_FILE_SIZE_BASE 2097152 +cf1 WRITE_BUFFER_SIZE 8388608 +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8.000000 +cf2 TARGET_FILE_SIZE_BASE 1048576 +cf2 WRITE_BUFFER_SIZE 16777216 +default MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +default TARGET_FILE_SIZE_BASE 1048576 +default WRITE_BUFFER_SIZE 12582912 +z MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +z TARGET_FILE_SIZE_BASE 4194304 +z WRITE_BUFFER_SIZE 12582912 +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +__system__ TARGET_FILE_SIZE_BASE 1048576 +__system__ WRITE_BUFFER_SIZE 12582912 + +drop table t1,t2,t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result new file mode 100644 index 0000000000000..05ac3f4f62dc6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result @@ -0,0 +1,409 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment'; +cf_name +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment'; +cf_name +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +set @@global.rocksdb_compact_cf = 'baz'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo'; +cf_name +foo +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf'; +cf_name +my_custom_cf +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz'; +cf_name +baz +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +set @@global.rocksdb_compact_cf = 't1-p0'; +set @@global.rocksdb_compact_cf = 'rev:bar'; +set @@global.rocksdb_compact_cf = 't1-p2'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0'; +cf_name +t1-p0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar'; +cf_name +rev:bar +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2'; +cf_name +t1-p2 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9), +PARTITION custom_p3 VALUES IN (10, 20, 30) +); +set @@global.rocksdb_compact_cf = 'cf-zero'; +set @@global.rocksdb_compact_cf = 'cf-one'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero'; +cf_name +cf-zero +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one'; +cf_name +cf-one +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +5 5 five NULL +3 3 three NULL +9 9 nine NULL +ALTER TABLE t1 DROP PRIMARY KEY; +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +5 5 five NULL +3 3 three NULL +9 9 nine NULL +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'bar'; +set @@global.rocksdb_compact_cf = 'baz'; +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf'; +set @@global.rocksdb_compact_cf = 'p0_cf'; +set @@global.rocksdb_compact_cf = 'p1_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf'; +cf_name +p0_cf +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf'; +cf_name +p1_cf +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +ALTER TABLE t1 PARTITION BY LIST(c1) ( +PARTITION custom_p3 VALUES IN (1, 4, 7), +PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9) +); +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf'; +set @@global.rocksdb_compact_cf = 'p3_cf'; +set @@global.rocksdb_compact_cf = 'p4_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf'; +cf_name +p3_cf +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf'; +cf_name +p4_cf +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +DROP TABLE t1; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +DROP TABLE t2; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +set @@global.rocksdb_compact_cf = 'my_cf0'; +set @@global.rocksdb_compact_cf = 'my_cf1'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0'; +cf_name +my_cf0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1'; +cf_name +my_cf1 +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; +col1 HEX(col2) HEX(col3) col4 HEX(col5) +100 012345 01 1 02 +200 012345 01 1 02 +300 012345 01 1 02 +100 023456 02 1 03 +100 034567 04 1 05 +400 089ABC 04 1 05 +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 3 Using where; Using index +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index +ALTER TABLE t2 DROP PRIMARY KEY; +ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1'; +set @@global.rocksdb_compact_cf = 'new_cf0'; +set @@global.rocksdb_compact_cf = 'new_cf1'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0'; +cf_name +new_cf0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1'; +cf_name +new_cf1 +INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2); +INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3); +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 4 Using where; Using index +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; +col1 HEX(col2) HEX(col3) col4 HEX(col5) +100 012345 01 1 02 +200 012345 01 1 02 +300 012345 01 1 02 +500 012345 05 1 02 +100 023456 02 1 03 +700 023456 07 1 03 +100 034567 04 1 05 +400 089ABC 04 1 05 +DROP TABLE t2; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', +KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0'; +cf_name +test_cf0 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1'; +cf_name +test_cf1 +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5'; +cf_name +test_cf5 +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p5 ref col2 col2 74 const,const 1 Using where +ALTER TABLE t2 DROP KEY `col2`; +ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5'; +cf_name +another_cf_for_p5 +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t2 custom_p2 ref col3 col3 258 const 1 Using where +DROP TABLE t2; +CREATE TABLE `t2` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', +UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5'; +cf_name +unique_test_cf5 +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2' +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2' +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); +DROP TABLE t2; +CREATE TABLE t1 ( +`a` int, +PRIMARY KEY (a) COMMENT "sharedcf" +) ENGINE=ROCKSDB; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf'; +cf_name +sharedcf +CREATE TABLE t2 ( +`a` INT, +`b` DATE, +`c` VARCHAR(42), +PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf" +) ENGINE=ROCKSDB +PARTITION BY LIST(`a`) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf'; +cf_name +notsharedcf +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result new file mode 100644 index 0000000000000..1c85343cabb4b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_reverse.result @@ -0,0 +1,120 @@ +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int primary key, +a int not null, +b int not null, +key(a) comment 'rev:foo', +key(b) comment 'bar' +) engine=rocksdb; +insert into t1 select a,a,a from t0; +insert into t1 select a+10,a+10,a+10 from t0; +# Primary key is not in a reverse-ordered CF, so full table scan +# returns rows in ascending order: +select * from t1; +pk a b +0 0 0 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +11 11 11 +12 12 12 +13 13 13 +14 14 14 +15 15 15 +16 16 16 +17 17 17 +18 18 18 +19 19 19 +explain +select a from t1 order by a limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 4 NULL # Using index +select a from t1 order by a limit 5; +a +0 +1 +2 +3 +4 +explain +select b from t1 order by b limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 4 NULL # Using index +select a from t1 order by a limit 5; +a +0 +1 +2 +3 +4 +explain +select a from t1 order by a desc limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL a 4 NULL # Using index +select a from t1 order by a desc limit 5; +a +19 +18 +17 +16 +15 +explain +select b from t1 order by b desc limit 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 4 NULL # Using index +select b from t1 order by b desc limit 5; +b +19 +18 +17 +16 +15 +drop table t1; +# +# Try a primary key in a reverse-ordered CF. +# +create table t2 ( +pk int, +a int not null, +primary key(pk) comment 'rev:cf1' +) engine=rocksdb; +insert into t2 select a,a from t0; +# Primary key is in a reverse-ordered CF, so full table scan +# returns rows in descending order: +select * from t2; +pk a +9 9 +8 8 +7 7 +6 6 +5 5 +4 4 +3 3 +2 2 +1 1 +0 0 +set autocommit=0; +begin; +delete from t2 where a=3 or a=7; +select * from t2; +pk a +9 9 +8 8 +6 6 +5 5 +4 4 +2 2 +1 1 +0 0 +rollback; +set autocommit=1; +drop table t2; +drop table t0; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result new file mode 100644 index 0000000000000..0ec2540e8dd5d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result @@ -0,0 +1,113 @@ +set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums; +set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums; +set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; +show variables like 'rocksdb_%checksum%'; +Variable_name Value +rocksdb_checksums_pct 100 +rocksdb_store_row_debug_checksums OFF +rocksdb_verify_row_debug_checksums OFF +create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t1 values (1,1,1),(2,2,2),(3,3,3); +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +FOUND 1 /0 table records had checksums/ in mysqld.1.err +drop table t1; +set session rocksdb_store_row_debug_checksums=on; +create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t2 values (1,1,1),(2,2,2),(3,3,3); +check table t2; +Table Op Msg_type Msg_text +test.t2 check status OK +FOUND 1 /3 table records had checksums/ in mysqld.1.err +# Now, make a table that has both rows with checksums and without +create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t3 values (1,1,1),(2,2,2),(3,3,3); +set session rocksdb_store_row_debug_checksums=off; +update t3 set b=3 where a=2; +set session rocksdb_store_row_debug_checksums=on; +check table t3; +Table Op Msg_type Msg_text +test.t3 check status OK +FOUND 1 /2 table records had checksums/ in mysqld.1.err +set session rocksdb_store_row_debug_checksums=on; +set session rocksdb_checksums_pct=5; +create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +check table t4; +Table Op Msg_type Msg_text +test.t4 check status OK +10000 index entries had around 500 checksums +10000 index entries had around 500 checksums +Around 500 table records had checksums +set session rocksdb_checksums_pct=100; +# +# Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches. +# +insert into mtr.test_suppressions values +('Checksum mismatch in key of key-value pair for index'), +('Checksum mismatch in value of key-value pair for index'), +('Data with incorrect checksum'); +# 1. Start with mismatch in key checksum of the PK. +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1"; +set session rocksdb_verify_row_debug_checksums=off; +select * from t3; +pk a b +1 1 1 +2 2 3 +3 3 3 +set session rocksdb_verify_row_debug_checksums=on; +select * from t3; +ERROR HY000: Internal error: Record checksum mismatch +select * from t4; +ERROR HY000: Internal error: Record checksum mismatch +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1"; +# 2. Continue with mismatch in pk value checksum. +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2"; +set session rocksdb_verify_row_debug_checksums=off; +select * from t3; +pk a b +1 1 1 +2 2 3 +3 3 3 +set session rocksdb_verify_row_debug_checksums=on; +select * from t3; +ERROR HY000: Internal error: Record checksum mismatch +select * from t4; +ERROR HY000: Internal error: Record checksum mismatch +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum2"; +# 3. Check if we catch checksum mismatches for secondary indexes +explain +select * from t3 force index(a) where a<4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 range a a 5 NULL # Using index condition +select * from t3 force index(a) where a<4; +pk a b +1 1 1 +2 2 3 +3 3 3 +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; +select * from t3 force index(a) where a<4; +ERROR HY000: Internal error: Record checksum mismatch +select * from t4 force index(a) where a<1000000; +ERROR HY000: Internal error: Record checksum mismatch +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; +# 4. The same for index-only reads? +explain +select a from t3 force index(a) where a<4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 index a a 5 NULL # Using where; Using index +select a from t3 force index(a) where a<4; +a +1 +2 +3 +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; +select a from t3 force index(a) where a<4; +ERROR HY000: Internal error: Record checksum mismatch +select a from t4 force index(a) where a<1000000; +ERROR HY000: Internal error: Record checksum mismatch +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; +set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums; +set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums; +set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; +drop table t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result new file mode 100644 index 0000000000000..ea9114c14d1b7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_concurrent_delete.result @@ -0,0 +1,84 @@ +connect con, localhost, root,,; +connection default; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk ASC FOR UPDATE; +connection default; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 1; +SET debug_sync='now SIGNAL go'; +connection con; +pk a +2 2 +3 3 +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; +connect con, localhost, root,,; +connection default; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk DESC FOR UPDATE; +connection default; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 3; +SET debug_sync='now SIGNAL go'; +connection con; +pk a +2 2 +1 1 +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; +connect con, localhost, root,,; +connection default; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk ASC FOR UPDATE; +connection default; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 1; +SET debug_sync='now SIGNAL go'; +connection con; +pk a +2 2 +3 3 +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; +connect con, localhost, root,,; +connection default; +SET debug_sync='RESET'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (pk INT PRIMARY KEY COMMENT "rev:cf2", a INT); +INSERT INTO t1 VALUES(1,1), (2,2), (3,3); +connection con; +SET debug_sync='rocksdb_concurrent_delete SIGNAL parked WAIT_FOR go'; +SELECT * FROM t1 order by t1.pk DESC FOR UPDATE; +connection default; +SET debug_sync='now WAIT_FOR parked'; +DELETE FROM t1 WHERE pk = 3; +SET debug_sync='now SIGNAL go'; +connection con; +pk a +2 2 +1 1 +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result new file mode 100644 index 0000000000000..40c53f6fd8a9b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_datadir.result @@ -0,0 +1,2 @@ +Check for MANIFEST files +MANIFEST-000006 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result new file mode 100644 index 0000000000000..e52f495e7d544 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rc.result @@ -0,0 +1,70 @@ +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +create table t (i int primary key); +create table r1 (id int primary key, value int); +insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +create table r2 like r1; +insert into r2 select * from r1; +connect con1,localhost,root,,; +begin; +update r2 set value=100 where id=9; +connect con2,localhost,root,,; +begin; +update r1 set value=100 where id=8; +select * from r2 for update;; +connection con1; +select * from r1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con2; +id value +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +rollback; +connection con1; +begin; +insert into t values (1); +connection con2; +begin; +insert into t values (2); +connect con3,localhost,root,,; +begin; +insert into t values (3); +connection con1; +select * from t where i = 2 for update; +connection con2; +select * from t where i = 3 for update; +connection con3; +select * from t; +i +3 +insert into t values (4), (1); +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# Statement should be rolled back +select * from t; +i +3 +rollback; +connection con2; +i +rollback; +connection con1; +i +rollback; +connection default; +disconnect con1; +disconnect con2; +disconnect con3; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t,r1,r2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result new file mode 100644 index 0000000000000..e52f495e7d544 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_detect_rr.result @@ -0,0 +1,70 @@ +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +create table t (i int primary key); +create table r1 (id int primary key, value int); +insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +create table r2 like r1; +insert into r2 select * from r1; +connect con1,localhost,root,,; +begin; +update r2 set value=100 where id=9; +connect con2,localhost,root,,; +begin; +update r1 set value=100 where id=8; +select * from r2 for update;; +connection con1; +select * from r1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +connection con2; +id value +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +rollback; +connection con1; +begin; +insert into t values (1); +connection con2; +begin; +insert into t values (2); +connect con3,localhost,root,,; +begin; +insert into t values (3); +connection con1; +select * from t where i = 2 for update; +connection con2; +select * from t where i = 3 for update; +connection con3; +select * from t; +i +3 +insert into t values (4), (1); +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +# Statement should be rolled back +select * from t; +i +3 +rollback; +connection con2; +i +rollback; +connection con1; +i +rollback; +connection default; +disconnect con1; +disconnect con2; +disconnect con3; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t,r1,r2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result new file mode 100644 index 0000000000000..f97da0099fe50 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rc.result @@ -0,0 +1,8 @@ +create table t1 (a int primary key, b int) engine=rocksdb; +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result new file mode 100644 index 0000000000000..f97da0099fe50 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_deadlock_stress_rr.result @@ -0,0 +1,8 @@ +create table t1 (a int primary key, b int) engine=rocksdb; +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result new file mode 100644 index 0000000000000..b2b6d7cdde9f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result @@ -0,0 +1,257 @@ +select * from information_schema.engines where engine = 'rocksdb'; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB DEFAULT RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) comment 'cf1' +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +# Try a basic case: +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +pk kp1 kp2 col1 +2 2 2 2 +4 4 4 4 +6 6 6 6 +8 8 8 8 +10 10 10 10 +# Check that ICP doesnt work for columns where column value +# cant be restored from mem-comparable form: +create table t3 ( +pk int primary key, +kp1 int, +kp2 varchar(10) collate utf8_general_ci, +col1 int, +key (kp1,kp2) comment 'cf1' +) engine=rocksdb; +insert into t3 select a,a/10,a,a from t1; +# This must not use ICP: +explain +select * from t3 where kp1=3 and kp2 like '%foo%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref kp1 kp1 5 const # Using where +explain format=json +select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%'; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "range", + "possible_keys": ["kp1"], + "key": "kp1", + "key_length": "5", + "used_key_parts": ["kp1"], + "rows": 1000, + "filtered": 100, + "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 % 3 = 0", + "attached_condition": "t3.kp2 like '%foo%'" + } + } +} +# Check that we handle the case where out-of-range is encountered sooner +# than matched index condition +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +# Try doing backwards scans +# MariaDB: ICP is not supported for reverse scans. +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +10 10 10 10 +8 8 8 8 +6 6 6 6 +4 4 4 4 +2 2 2 2 +explain +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +998 998 998 998 +996 996 996 996 +994 994 994 994 +992 992 992 992 +explain +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +pk kp1 kp2 col1 +drop table t0,t1,t2,t3; +# +# Check how ICP affects counters +# +# First, some preparations +# +# in facebook/mysql-5.6, it was: +# select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +# +# In MariaDB, we do: +create procedure save_read_stats() +begin +set @rr=(select ROWS_READ +from information_schema.table_statistics +where table_name='t4' and table_schema=database()); +set @rif= (select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first'); +set @rin=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next'); +set @icp_attempts=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts'); +set @icp_matches=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match'); +end| +create procedure get_read_stats() +begin +select +(select ROWS_READ +from information_schema.table_statistics +where table_name='t4' and table_schema=database() +) - @rr as ROWS_READ_DIFF, +(select VARIABLE_VALUE - @rif +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first') as ROWS_INDEX_FIRST, +(select VARIABLE_VALUE - @rin +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next') as ROWS_INDEX_NEXT, +(select VARIABLE_VALUE - @icp_attempts +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts') as ICP_ATTEMPTS, +(select VARIABLE_VALUE - @icp_matches +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match') as ICP_MATCHES; +end| +create table t4 ( +id int, +id1 int, +id2 int, +value int, +value2 varchar(100), +primary key (id), +key id1_id2 (id1, id2) comment 'cf1' +) engine=rocksdb charset=latin1 collate latin1_bin; +insert into t4 values +(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5), +(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10); +# +# Now, the test itself +# +call save_read_stats(); +call get_read_stats(); +ROWS_READ_DIFF ROWS_INDEX_FIRST ROWS_INDEX_NEXT ICP_ATTEMPTS ICP_MATCHES +0 0 0 0 0 +# ============== index-only query ============== +explain +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where; Using index +call save_read_stats(); +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id1 id2 +1 1 +call get_read_stats(); +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 +# ============== Query without ICP ============== +set optimizer_switch='index_condition_pushdown=off'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 +# ============== Query with ICP ============== +set optimizer_switch='index_condition_pushdown=on'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using index condition +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ_DIFF 1 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 1 +ICP_ATTEMPTS 10 +ICP_MATCHES 1 +drop table t4; +drop procedure save_read_stats; +drop procedure get_read_stats; +# +# Issue #67: Inefficient index condition pushdown +# +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int not null primary key, +key1 bigint(20) unsigned, +col1 int, +key (key1) +) engine=rocksdb; +insert into t1 +select +A.a+10*B.a+100*C.a, +A.a+10*B.a+100*C.a, +1234 +from t0 A, t0 B, t0 C; +set @count=0; +explain +select * from t1 where key1=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 9 const # +set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context +where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); +select * from t1 where key1=1; +pk key1 col1 +1 1 1234 +set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context +where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); +# The following must be =1, or in any case not 999: +select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment"; +INTERNAL_KEY_SKIPPED_COUNT increment +1 +drop table t0,t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result new file mode 100644 index 0000000000000..9c4b2d22ad74f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result @@ -0,0 +1,223 @@ +select * from information_schema.engines where engine = 'rocksdb'; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB DEFAULT RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) comment 'rev:cf1' +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +# Try a basic case: +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0; +pk kp1 kp2 col1 +2 2 2 2 +4 4 4 4 +6 6 6 6 +8 8 8 8 +10 10 10 10 +# Check that ICP doesnt work for columns where column value +# cant be restored from mem-comparable form: +create table t3 ( +pk int primary key, +kp1 int, +kp2 varchar(10) collate utf8_general_ci, +col1 int, +key (kp1,kp2) comment 'rev:cf1' +) engine=rocksdb; +insert into t3 select a,a/10,a,a from t1; +# This must not use ICP: +explain +select * from t3 where kp1=3 and kp2 like '%foo%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ref kp1 kp1 5 const # Using where +explain format=json +select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%'; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t3", + "access_type": "range", + "possible_keys": ["kp1"], + "key": "kp1", + "key_length": "5", + "used_key_parts": ["kp1"], + "rows": 1000, + "filtered": 100, + "index_condition": "t3.kp1 between 2 and 4 and t3.kp1 % 3 = 0", + "attached_condition": "t3.kp2 like '%foo%'" + } + } +} +# Check that we handle the case where out-of-range is encountered sooner +# than matched index condition +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +explain +select * from t2 where kp1< 3 and kp2+1>50000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using index condition +select * from t2 where kp1< 3 and kp2+1>50000; +pk kp1 kp2 col1 +# Try doing backwards scans +# MariaDB: ICP is not supported for reverse scans. +explain +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where +select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +10 10 10 10 +8 8 8 8 +6 6 6 6 +4 4 4 4 +2 2 2 2 +explain +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where +select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc; +pk kp1 kp2 col1 +998 998 998 998 +996 996 996 996 +994 994 994 994 +992 992 992 992 +explain +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range kp1 kp1 5 NULL # Using where +select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc; +pk kp1 kp2 col1 +drop table t0,t1,t2,t3; +# +# Check how ICP affects counters +# +# First, some preparations +# +# in facebook/mysql-5.6, it was: +# select ROWS_READ, ROWS_REQUESTED, ROWS_INDEX_FIRST, ROWS_INDEX_NEXT +# +# In MariaDB, we do: +create procedure save_read_stats() +begin +set @rr=(select ROWS_READ +from information_schema.table_statistics +where table_name='t4' and table_schema=database()); +set @rif= (select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first'); +set @rin=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next'); +set @icp_attempts=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts'); +set @icp_matches=(select VARIABLE_VALUE +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match'); +end| +create procedure get_read_stats() +begin +select +(select ROWS_READ +from information_schema.table_statistics +where table_name='t4' and table_schema=database() +) - @rr as ROWS_READ_DIFF, +(select VARIABLE_VALUE - @rif +from information_schema.session_status +where VARIABLE_NAME='Handler_read_first') as ROWS_INDEX_FIRST, +(select VARIABLE_VALUE - @rin +from information_schema.session_status +where VARIABLE_NAME='Handler_read_next') as ROWS_INDEX_NEXT, +(select VARIABLE_VALUE - @icp_attempts +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_attempts') as ICP_ATTEMPTS, +(select VARIABLE_VALUE - @icp_matches +from information_schema.session_status +where VARIABLE_NAME='Handler_icp_match') as ICP_MATCHES; +end| +create table t4 ( +id int, +id1 int, +id2 int, +value int, +value2 varchar(100), +primary key (id), +key id1_id2 (id1, id2) comment 'rev:cf1' +) engine=rocksdb charset=latin1 collate latin1_bin; +insert into t4 values +(1,1,1,1,1), (2,1,2,2,2), (3,1,3,3,3),(4,1,4,4,4),(5,1,5,5,5), +(6,1,6,6,6), (7,1,7,7,7), (8,1,8,8,8),(9,1,9,9,9),(10,1,10,10,10); +# +# Now, the test itself +# +call save_read_stats(); +call get_read_stats(); +ROWS_READ_DIFF ROWS_INDEX_FIRST ROWS_INDEX_NEXT ICP_ATTEMPTS ICP_MATCHES +0 0 0 0 0 +# ============== index-only query ============== +explain +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where; Using index +call save_read_stats(); +select id1,id2 from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id1 id2 +1 1 +call get_read_stats(); +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 +# ============== Query without ICP ============== +set optimizer_switch='index_condition_pushdown=off'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using where +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ_DIFF 10 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 10 +ICP_ATTEMPTS 0 +ICP_MATCHES 0 +# ============== Query with ICP ============== +set optimizer_switch='index_condition_pushdown=on'; +explain +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref id1_id2 id1_id2 5 const # Using index condition +call save_read_stats(); +select * from t4 force index (id1_id2) where id1=1 and id2 % 10 = 1; +id id1 id2 value value2 +1 1 1 1 1 +call get_read_stats(); +ROWS_READ_DIFF 1 +ROWS_INDEX_FIRST 0 +ROWS_INDEX_NEXT 1 +ICP_ATTEMPTS 10 +ICP_MATCHES 1 +drop table t4; +drop procedure save_read_stats; +drop procedure get_read_stats; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result new file mode 100644 index 0000000000000..925cd2c60dbdd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result @@ -0,0 +1,64 @@ +create table t1 (pk int not null primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); +set autocommit=0; +begin; +select * from t1 where pk=1 for update; +pk +1 +connect con1,localhost,root,,; +connection con1; +call mtr.add_suppression("Got snapshot conflict errors"); +### Connection con1 +set @@rocksdb_lock_wait_timeout=500; +set autocommit=0; +begin; +select * from t1 where pk=1 for update;; +connection default; +### Connection default +rollback; +connection con1; +pk +1 +rollback; +connection default; +begin; +select * from t1 where pk=1 for update; +pk +1 +connection con1; +### Connection con1 +set @@rocksdb_lock_wait_timeout=2; +set autocommit=0; +begin; +select * from t1 where pk=1 for update; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection default; +rollback; +set autocommit=1; +connection con1; +drop table t1; +connection default; +# +# Now, test what happens if another transaction modified the record and committed +# +CREATE TABLE t1 ( +id int primary key, +value int +) engine=rocksdb collate latin1_bin; +insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +connection con1; +BEGIN; +SELECT * FROM t1 WHERE id=3; +id value +3 3 +connection default; +BEGIN; +UPDATE t1 SET value=30 WHERE id=3; +COMMIT; +connection con1; +SELECT * FROM t1 WHERE id=3 FOR UPDATE; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ROLLBACK; +disconnect con1; +connection default; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result new file mode 100644 index 0000000000000..7bebbbec2052b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result @@ -0,0 +1,123 @@ +drop table if exists t1,t2; +# Tests for MyRocks + partitioning +# +# MyRocks Issue #70: Server crashes in Rdb_key_def::get_primary_key_tuple +# +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT, f2 INT, KEY(f2)) ENGINE=RocksDB +PARTITION BY HASH(pk) PARTITIONS 2; +INSERT INTO t1 VALUES (1, 6, NULL), (2, NULL, 1); +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1, 1), (2, 1); +SELECT f1 FROM t1 WHERE f2 = ( SELECT f1 FROM t2 WHERE pk = 2 ); +f1 +NULL +drop table t1,t2; +# +# Issue#105: key_info[secondary_key].actual_key_parts does not include primary key on partitioned tables +# +CREATE TABLE t1 ( +id INT PRIMARY KEY, +a set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8, +b set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 default null, +c set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 not null, +INDEX (a), +INDEX (b), +INDEX (c) +) ENGINE=RocksDB PARTITION BY key (id) partitions 2; +INSERT INTO t1 (id, b) VALUES (28, 3); +Warnings: +Warning 1364 Field 'c' doesn't have a default value +UPDATE t1 SET id=8 WHERE c < 8 LIMIT 1; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +drop table t1; +# +# Issue #105, another testcase +# +create table t1 ( +pk int primary key, +col1 int, +col2 int, +key (col1) comment 'rev:cf_issue105' +) engine=rocksdb partition by hash(pk) partitions 2; +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +explain select * from t1 force index(col1) where col1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const 2000 +select * from t1 force index(col1) where col1=10; +pk col1 col2 +1 10 10 +2 10 10 +select * from t1 use index () where col1=10; +pk col1 col2 +2 10 10 +1 10 10 +drop table t1; +# +# Issue #108: Index-only scans do not work for partitioned tables and extended keys +# +create table t1 ( +pk int primary key, +col1 int, +col2 int, +key (col1) +) engine=rocksdb partition by hash(pk) partitions 2; +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +# The following must use "Using index" +explain select pk from t1 force index(col1) where col1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref col1 col1 5 const 2000 Using index +drop table t1; +# +# Issue #214: subqueries cause crash +# +create TABLE t1(a int,b int,c int,primary key(a,b)) +partition by list (b*a) (partition x1 values in (1) tablespace ts1, +partition x2 values in (3,11,5,7) tablespace ts2, +partition x3 values in (16,8,5+19,70-43) tablespace ts3); +create table t2(b binary(2)); +set session optimizer_switch='materialization=off'; +insert into t1(a,b) values(1,7); +select a from t1 where a in (select a from t1 where a in (select b from t2)); +a +drop table t1, t2; +# +# Issue #260: altering name to invalid value leaves table unaccessible +# +CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; +INSERT INTO t1 VALUES(1,'a'); +RENAME TABLE t1 TO db3.t3; +ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: 122 "Internal (unspecified) error in handler") +SELECT * FROM t1; +c1 c2 +1 a +SHOW TABLES; +Tables_in_test +t1 +RENAME TABLE t1 TO test.t3; +SELECT * FROM t3; +c1 c2 +1 a +SHOW TABLES; +Tables_in_test +t3 +CREATE DATABASE db3; +USE test; +RENAME TABLE t3 to db3.t2; +USE db3; +SELECT * FROM t2; +c1 c2 +1 a +SHOW TABLES; +Tables_in_db3 +t2 +DROP TABLE t2; +use test; +DROP DATABASE db3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result new file mode 100644 index 0000000000000..57c5b897462e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_qcache.result @@ -0,0 +1,39 @@ +create table t1 (pk int primary key, c char(8)) engine=RocksDB; +insert into t1 values (1,'new'),(2,'new'); +select * from t1; +pk c +1 new +2 new +connect con1,localhost,root,,; +update t1 set c = 'updated'; +connection default; +flush status; +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +show global status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +select * from t1; +pk c +1 updated +2 updated +select sql_no_cache * from t1; +pk c +1 updated +2 updated +select * from t1 where pk = 1; +pk c +1 updated +show status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +# MariaDB: Qcache_not_cached is not incremented for select sql_no_cache queries +# so the following query produces 2, not 3: +show status like 'Qcache_not_cached'; +Variable_name Value +Qcache_not_cached 2 +show global status like 'Qcache_hits'; +Variable_name Value +Qcache_hits 0 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result new file mode 100644 index 0000000000000..918859ea0365f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range.result @@ -0,0 +1,293 @@ +select * from information_schema.engines where engine = 'rocksdb'; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +ROCKSDB DEFAULT RocksDB storage engine YES YES YES +drop table if exists t0,t1,t2,t3,t4,t5; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int not null, +a int not null, +b int not null, +primary key(pk), +key(a) comment 'rev:cf1' +) engine=rocksdb; +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; +# +# HA_READ_KEY_EXACT tests +# +# Original failure was here: +explain +select * from t2 force index (a) where a=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 4 const # +select * from t2 force index (a) where a=0; +pk a b +0 0 0 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +# The rest are for code coverage: +explain +select * from t2 force index (a) where a=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 4 const # +select * from t2 force index (a) where a=2; +pk a b +20 2 20 +21 2 21 +22 2 22 +23 2 23 +24 2 24 +25 2 25 +26 2 26 +27 2 27 +28 2 28 +29 2 29 +explain +select * from t2 force index (a) where a=3 and pk=33; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 const a a 8 const,const # +select * from t2 force index (a) where a=3 and pk=33; +pk a b +33 3 33 +select * from t2 force index (a) where a=99 and pk=99; +pk a b +select * from t2 force index (a) where a=0 and pk=0; +pk a b +0 0 0 +select * from t2 force index (a) where a=-1; +pk a b +select * from t2 force index (a) where a=-1 and pk in (101,102); +pk a b +select * from t2 force index (a) where a=100 and pk in (101,102); +pk a b +# +# #36: Range in form tbl.key >= const doesn't work in reverse column family +# +explain +select count(*) from t2 force index (a) where a>=0 and a <=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>=0 and a <=1; +count(*) +20 +explain +select count(*) from t2 force index (a) where a>=-1 and a <=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>=-1 and a <=1; +count(*) +20 +explain +select * from t2 force index (a) where a=0 and pk>=3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 8 NULL # Using index condition +select * from t2 force index (a) where a=0 and pk>=3; +pk a b +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +# Try edge cases where we fall over the end of the table +create table t3 like t2; +insert into t3 select * from t2; +select * from t3 where pk>=1000000; +pk a b +select * from t2 where pk>=1000000; +pk a b +# +# #42: Range in form tbl.key > const doesn't work in reverse column family +# +explain +select count(*) from t2 force index (a) where a>0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>0; +count(*) +990 +explain +select count(*) from t2 force index (a) where a>99; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where; Using index +select count(*) from t2 force index (a) where a>99; +count(*) +0 +select * from t2 where pk>1000000; +pk a b +select * from t3 where pk>1000000; +pk a b +explain +select count(*) from t2 force index (a) where a=2 and pk>25; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 8 NULL # Using where; Using index +select count(*) from t2 force index (a) where a=2 and pk>25; +count(*) +4 +select * from t2 force index (a) where a>-10 and a < 1; +pk a b +0 0 0 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +select * from t3 force index (a) where a>-10 and a < 1; +pk a b +0 0 0 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +# +# #46: index_read_map(HA_READ_BEFORE_KEY) does not work in reverse column family +# +select max(a) from t2 where a < 2; +max(a) +1 +select max(a) from t2 where a < -1; +max(a) +NULL +select max(pk) from t2 where a=3 and pk < 6; +max(pk) +NULL +select max(pk) from t2 where pk < 200000; +max(pk) +999 +select max(pk) from t2 where pk < 20; +max(pk) +19 +select max(a) from t3 where a < 2; +max(a) +1 +select max(a) from t3 where a < -1; +max(a) +NULL +select max(pk) from t3 where pk < 200000; +max(pk) +999 +select max(pk) from t3 where pk < 20; +max(pk) +19 +select max(pk) from t2 where a=3 and pk < 33; +max(pk) +32 +select max(pk) from t3 where a=3 and pk < 33; +max(pk) +32 +# +# #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF +# +# Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV +# Note: the next explain has "Using index condition" in fb/mysql-5.6 +# but "Using where" in MariaDB because the latter does not +# support ICP over reverse scans. +explain +select * from t2 where a between 99 and 2000 order by a desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range a a 4 NULL # Using where +select * from t2 where a between 99 and 2000 order by a desc; +pk a b +999 99 999 +998 99 998 +997 99 997 +996 99 996 +995 99 995 +994 99 994 +993 99 993 +992 99 992 +991 99 991 +990 99 990 +select max(a) from t2 where a <=10; +max(a) +10 +select max(a) from t2 where a <=-4; +max(a) +NULL +select max(pk) from t2 where a=5 and pk <=55; +max(pk) +55 +select max(pk) from t2 where a=5 and pk <=55555; +max(pk) +59 +select max(pk) from t2 where a=5 and pk <=0; +max(pk) +NULL +select max(pk) from t2 where pk <=-1; +max(pk) +NULL +select max(pk) from t2 where pk <=999999; +max(pk) +999 +select max(pk) from t3 where pk <=-1; +max(pk) +NULL +select max(pk) from t3 where pk <=999999; +max(pk) +999 +# +# Tests for search_flag=HA_READ_PREFIX_LAST +# +create table t4 ( +pk int primary key, +a int, +b int, +c int, +key(a,b,c) +) engine=rocksdb; +insert into t4 select pk,pk,pk,pk from t2 where pk < 100; +explain +select * from t4 where a=1 and b in (1) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref a a 10 const,const # Using where; Using index +select * from t4 where a=1 and b in (1) order by c desc; +pk a b c +1 1 1 1 +explain +select * from t4 where a=5 and b in (4) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref a a 10 const,const # Using where; Using index +select * from t4 where a=5 and b in (4) order by c desc; +pk a b c +# HA_READ_PREFIX_LAST for reverse-ordered CF +create table t5 ( +pk int primary key, +a int, +b int, +c int, +key(a,b,c) comment 'rev:cf2' +) engine=rocksdb; +insert into t5 select pk,pk,pk,pk from t2 where pk < 100; +explain +select * from t5 where a=1 and b in (1) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t5 ref a a 10 const,const # Using where; Using index +select * from t5 where a=1 and b in (1) order by c desc; +pk a b c +1 1 1 1 +explain +select * from t5 where a=5 and b in (4) order by c desc; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t5 ref a a 10 const,const # Using where; Using index +select * from t5 where a=5 and b in (4) order by c desc; +pk a b c +drop table t0,t1,t2,t3,t4,t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result new file mode 100644 index 0000000000000..d7a4f9dd06527 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result @@ -0,0 +1,11 @@ +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +select count(*) from t1; +count(*) +10000 +explain select c1 from t1 where c1 > 5 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range i i 9 NULL 9900 Using where; Using index +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result new file mode 100644 index 0000000000000..8c02de98c907b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_row_stats.result @@ -0,0 +1,66 @@ +create table t1 (a int primary key) engine=rocksdb; +Verify rocksdb_rows_inserted +select variable_value into @old_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +select @new_rows_inserted - @old_rows_inserted; +@new_rows_inserted - @old_rows_inserted +1 +Verify rocksdb_rows_updated +select variable_value into @old_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +select @new_rows_updated - @old_rows_updated; +@new_rows_updated - @old_rows_updated +1 +Verify rocksdb_rows_read +select variable_value into @old_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select * from t1; +a +2 +select variable_value into @new_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select @new_rows_read - @old_rows_read; +@new_rows_read - @old_rows_read +1 +Verify rocksdb_rows_deleted +select variable_value into @old_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +delete from t1; +select variable_value into @new_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +select @new_rows_deleted - @old_rows_deleted; +@new_rows_deleted - @old_rows_deleted +1 +use mysql; +create table t1(a int primary key) engine=rocksdb; +Verify rocksdb_system_rows_inserted +select variable_value into @old_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +select @new_system_rows_inserted - @old_system_rows_inserted; +@new_system_rows_inserted - @old_system_rows_inserted +1 +Verify rocksdb_system_rows_updated +select variable_value into @old_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +select @new_system_rows_updated - @old_system_rows_updated; +@new_system_rows_updated - @old_system_rows_updated +1 +Verify rocksdb_system_rows_read +select variable_value into @old_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select * from t1; +a +2 +select variable_value into @new_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select @new_system_rows_read - @old_system_rows_read; +@new_system_rows_read - @old_system_rows_read +1 +Verify rocksdb_system_rows_deleted +select variable_value into @old_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +delete from t1; +select variable_value into @new_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +select @new_system_rows_deleted - @old_system_rows_deleted; +@new_system_rows_deleted - @old_system_rows_deleted +1 +drop table t1; +use test; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result new file mode 100644 index 0000000000000..1e8aa5787a6f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_table_stats_sampling_pct_change.result @@ -0,0 +1,23 @@ +drop table if exists t1; +SET @ORIG_PCT = @@ROCKSDB_TABLE_STATS_SAMPLING_PCT; +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100; +create table t1 (pk int primary key) engine=rocksdb; +set global rocksdb_force_flush_memtable_now = true; +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't1'; +table_rows +10000 +drop table t1; +drop table if exists t2; +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 10; +create table t2 (pk int primary key) engine=rocksdb; +set global rocksdb_force_flush_memtable_now = true; +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't2'; +table_rows +10000 +select table_name from information_schema.tables where table_schema = database() and table_name = 't2'; +table_name +t2 +drop table t2; +SET GLOBAL ROCKSDB_TABLE_STATS_SAMPLING_PCT = @ORIG_PCT; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result b/storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result new file mode 100644 index 0000000000000..18b76818e6abf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rollback_savepoint.result @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +CREATE TABLE t2 LIKE t1; +INSERT INTO t2 SELECT * FROM t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +SAVEPOINT a; +SELECT * FROM t1 ORDER BY pk; +a b pk +1 a 1 +2 b 2 +3 a 3 +ROLLBACK TO SAVEPOINT a; +SAVEPOINT a; +SELECT * FROM t2 ORDER BY pk; +a b pk +1 a 1 +2 b 2 +3 a 3 +ROLLBACK TO SAVEPOINT a; +connection con2; +ALTER TABLE t1 RENAME TO t3; +connection default; +DROP TABLE t2, t3; +disconnect con1; +disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result new file mode 100644 index 0000000000000..82609f464234a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_read_free.result @@ -0,0 +1,321 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create procedure save_read_stats() +begin +select rows_requested into @rq from information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value into @rr from information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value into @ru from information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value into @rd from information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create procedure get_read_stats() +begin +select rows_requested - @rq as rows_requested from +information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value - @rr as rows_read from +information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value - @ru as rows_updated from +information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value - @rd as rows_deleted from +information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4); +include/sync_slave_sql_with_master.inc + +# regular update/delete. With rocks_read_free_rpl_tables=.*, rocksdb_rows_read does not increase on slaves + +call save_read_stats(); +update t1 set value=value+1 where id=1; +delete from t1 where id=4; +select * from t1; +id value +1 2 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +1 +rows_deleted +1 +select * from t1; +id value +1 2 +2 2 +3 3 + +# "rocks_read_free_rpl_tables=.*" makes "row not found error" not happen anymore + +include/stop_slave.inc +delete from t1 where id in (2, 3); +include/start_slave.inc +call save_read_stats(); +update t1 set value=value+1 where id=3; +delete from t1 where id=2; +select * from t1; +id value +1 2 +3 4 +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +1 +rows_deleted +1 +select * from t1; +id value +1 2 +3 4 + +## tables without primary key -- read free replication should be disabled + + +#no index + +drop table t1; +create table t1 (c1 int, c2 int); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +include/sync_slave_sql_with_master.inc +call save_read_stats(); +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +5 +rows_read +5 +rows_updated +1 +rows_deleted +2 +select * from t1; +c1 c2 +3 100 +4 4 +5 5 + +#secondary index only + +drop table t1; +create table t1 (c1 int, c2 int, index i(c1)); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +include/sync_slave_sql_with_master.inc +call save_read_stats(); +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +3 +rows_read +3 +rows_updated +1 +rows_deleted +2 +select * from t1; +c1 c2 +3 100 +4 4 +5 5 + +## large row operations -- primary key modification, secondary key modification + +drop table t1; +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); +include/sync_slave_sql_with_master.inc +call save_read_stats(); + +#updating all seconary keys by 1 + +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#updating all primary keys by 2 + +call save_read_stats(); +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#updating secondary keys after truncating t1 on slave + +truncate table t1; +call save_read_stats(); +update t1 set c2=c2+10; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#updating primary keys after truncating t1 on slave + +truncate table t1; +call save_read_stats(); +update t1 set id2=id2+10; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +10000 +rows_deleted +0 +include/diff_tables.inc [master:t1, slave:t1] + +#deleting half rows + +call save_read_stats(); +delete from t1 where id1 <= 5000; +include/sync_slave_sql_with_master.inc +call get_read_stats(); +rows_requested +0 +rows_read +0 +rows_updated +0 +rows_deleted +5000 +include/diff_tables.inc [master:t1, slave:t1] +[on master] +create table t2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +include/sync_slave_sql_with_master.inc +[on slave] +delete from t2 where id <= 2; +delete from u2 where id <= 2; +[on master] +update t2 set i2=100, value=100 where id=1; +update u2 set i2=100, value=100 where id=1; +[on slave] +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.u2.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 'u2'.*"); +include/wait_for_slave_sql_error.inc [errno=1032] +select count(*) from t2 force index(primary); +count(*) +2 +select count(*) from t2 force index(i1); +count(*) +1 +select count(*) from t2 force index(i2); +count(*) +2 +select * from t2 where id=1; +id i1 i2 value +1 1 100 100 +select i1 from t2 where i1=1; +i1 +select i2 from t2 where i2=100; +i2 +100 +select count(*) from u2 force index(primary); +count(*) +1 +select count(*) from u2 force index(i1); +count(*) +1 +select count(*) from u2 force index(i2); +count(*) +1 +select * from u2 where id=1; +id i1 i2 value +select i1 from u2 where i1=1; +i1 +select i2 from u2 where i2=100; +i2 +include/wait_for_slave_sql_to_start.inc + +# some tables with read-free replication on and some with it off +# secondary keys have extra rows + +[on master] +create table t3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +include/sync_slave_sql_with_master.inc +[on slave] +update t3 set i1=100 where id=1; +update u3 set i1=100 where id=1; +[on master] +delete from t3 where id=1; +delete from u3 where id=1; +include/sync_slave_sql_with_master.inc +[on slave] +select count(*) from t3 force index(primary); +count(*) +2 +select count(*) from t3 force index(i1); +count(*) +3 +select count(*) from t3 force index(i2); +count(*) +2 +select i1 from t3 where i1=100; +i1 +100 +select count(*) from u3 force index(primary); +count(*) +2 +select count(*) from u3 force index(i1); +count(*) +2 +select count(*) from u3 force index(i2); +count(*) +2 +select i1 from u3 where i1=100; +i1 +drop table t1, t2, t3, u2, u3; +drop procedure save_read_stats; +drop procedure get_read_stats; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result new file mode 100644 index 0000000000000..8cdfa9107392c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_not_found.result @@ -0,0 +1,56 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists t1; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) +) engine=rocksdb; +insert into t2 select a,a,a,a from t1; +create table t3 like t2; +insert into t3 select * from t2; +include/sync_slave_sql_with_master.inc +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; +set debug_sync= 'now WAIT_FOR Reached'; +set global debug = ''; +set sql_log_bin=0; +delete from t2 where pk=2; +delete from t2 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +include/sync_slave_sql_with_master.inc +select * from t2 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 4 +set global debug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; +call mtr.add_suppression("Deadlock found when trying to get lock"); +set debug_sync= 'now WAIT_FOR Reached'; +set global debug = ''; +set sql_log_bin=0; +delete from t3 where pk=2; +delete from t3 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +include/sync_slave_sql_with_master.inc +select * from t3 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 100 +drop table t0, t1, t2, t3; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result new file mode 100644 index 0000000000000..de47f3b39b0b9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_rocksdb.result @@ -0,0 +1,45 @@ +include/master-slave.inc +[connection master] +connection master; +drop table if exists t1; +connection master; +select @@binlog_format; +@@binlog_format +ROW +create table t1 (pk int primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; +pk +1 +2 +3 +connection master; +drop table t1; +# +# Issue #18: slave crash on update with row based binary logging +# +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where id=2; +id value value2 +2 1 200 +select * from t1 where id=3; +id value value2 +3 1 300 +connection master; +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result new file mode 100644 index 0000000000000..a14d2693ad3f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_stats.result @@ -0,0 +1,98 @@ +include/master-slave.inc +[connection master] +connection master; +drop table if exists t1; +connection master; +create procedure save_read_stats() +begin +/*select rows_requested into @rq from information_schema.table_statistics +where table_schema=database() and table_name='t1';*/ +select rows_read into @rr_is from information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value into @rr from information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value into @ru from information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value into @rd from information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create procedure get_read_stats() +begin +/*select rows_requested - @rq as rows_requested from +information_schema.table_statistics +where table_schema=database() and table_name='t1';*/ +select rows_read - @rr_is as rows_read_userstat from +information_schema.table_statistics +where table_schema=database() and table_name='t1'; +select variable_value - @rr as rows_read from +information_schema.global_status +where variable_name='rocksdb_rows_read'; +select variable_value - @ru as rows_updated from +information_schema.global_status +where variable_name='rocksdb_rows_updated'; +select variable_value - @rd as rows_deleted from +information_schema.global_status +where variable_name='rocksdb_rows_deleted'; +end// +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5); +include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; +update t1 set value=value+1 where id=1; +update t1 set value=value+1 where id=3; +select * from t1; +id value +1 2 +2 2 +3 4 +4 4 +5 5 +include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +rows_read_userstat +2 +rows_read +2 +rows_updated +2 +rows_deleted +0 +select * from t1; +id value +1 2 +2 2 +3 4 +4 4 +5 5 +call save_read_stats(); +connection master; +delete from t1 where id in (4,5); +select * from t1; +id value +1 2 +2 2 +3 4 +include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +rows_read_userstat +2 +rows_read +2 +rows_updated +0 +rows_deleted +2 +select * from t1; +id value +1 2 +2 2 +3 4 +connection master; +drop table t1; +drop procedure save_read_stats; +drop procedure get_read_stats; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result new file mode 100644 index 0000000000000..1d3cd7db641e0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_row_triggers.result @@ -0,0 +1,242 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +# Test of row replication with triggers on the slave side +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; +C1 C2 +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +C1 C2 +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values +('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), +('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), +('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_db before delete on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd0'; +create trigger t1_cnt_ib before insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# INSERT triggers test +insert into t1 values ('a','b'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 a +i1 1 a +u0 0 +u1 0 +# UPDATE triggers test +update t1 set C1= 'd'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 a +i1 1 a +u0 1 a d +u1 1 a d +# DELETE triggers test +delete from t1 where C1='d'; +SELECT * FROM t2 order by id; +id cnt o n +d0 1 d +d1 1 d +i0 1 a +i1 1 a +u0 1 a d +u1 1 a d +# INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 1 d +d1 1 d +i0 2 0 +i1 2 0 +u0 1 a d +u1 1 a d +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 1 d +d1 1 d +i0 3 0 +i1 3 0 +u0 2 0 0 +u1 2 0 0 +# INSERT triggers which cause also DELETE test +# (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); +drop table if exists t1; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; +CREATE TABLE t1 (i INT); +CREATE TABLE t2 (i INT); +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr=YES; +CREATE TRIGGER tr AFTER INSERT ON t1 FOR EACH ROW +INSERT INTO t2 VALUES (new.i); +BEGIN; +INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (2); +COMMIT; +select * from t2; +i +1 +2 +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop tables t2,t1; +# Triggers on slave do not work if master has some +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; +C1 C2 +create trigger t1_dummy before delete on t1 for each row +set @dummy= 1; +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +C1 C2 +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values +('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), +('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), +('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_ib before insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row +update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row +update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# INSERT triggers test +insert into t1 values ('a','b'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# UPDATE triggers test +update t1 set C1= 'd'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# DELETE triggers test +delete from t1 where C1='d'; +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 0 +i1 0 +u0 0 +u1 0 +# INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 0 +i1 1 0 +u0 0 +u1 0 +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; +id cnt o n +d0 0 +d1 0 +i0 1 0 +i1 1 0 +u0 0 +u1 0 +# INSERT triggers which cause also DELETE test +# (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); +drop table if exists t1; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; +# +# MDEV-5513: Trigger is applied to the rows after first one +# +create table t1 (a int, b int); +create table tlog (a int auto_increment primary key); +set sql_log_bin=0; +create trigger tr1 after insert on t1 for each row insert into tlog values (null); +set sql_log_bin=1; +set @slave_run_triggers_for_rbr.saved = @@slave_run_triggers_for_rbr; +set global slave_run_triggers_for_rbr=1; +create trigger tr2 before insert on t1 for each row set new.b = new.a; +insert into t1 values (1,10),(2,20),(3,30); +select * from t1; +a b +1 10 +2 20 +3 30 +# +# Verify slave skips running triggers if master ran and logged the row events for triggers +# +create table t4(a int, b int); +delete from tlog; +create trigger tr4 before insert on t4 for each row insert into tlog values (null); +insert into t4 values (1, 10),(2, 20); +select * from tlog; +a +4 +5 +select * from t4; +a b +1 10 +2 20 +select * from tlog; +a +4 +5 +set global slave_run_triggers_for_rbr = @slave_run_triggers_for_rbr.saved; +drop table t1, tlog, t4; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result new file mode 100644 index 0000000000000..5746119efacfb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_savepoint.result @@ -0,0 +1,110 @@ +include/master-slave.inc +[connection master] +connection master; +drop table if exists t1; +connection master; +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3); +begin; +insert into t1 values (11, 1); +savepoint a; +insert into t1 values (12, 1); +rollback to savepoint a; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +commit; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +commit; +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; +id value +1 1 +2 2 +3 3 +connection master; +begin; +insert into t1 values (21, 1); +savepoint a; +insert into t1 values (22, 1); +rollback to savepoint a; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +insert into t1 values (23, 1); +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +commit; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +commit; +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; +id value +1 1 +2 2 +3 3 +connection master; +begin; +insert into t1 values (31, 1); +savepoint a; +insert into t1 values (32, 1); +savepoint b; +insert into t1 values (33, 1); +rollback to savepoint a; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +insert into t1 values (34, 1); +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +rollback; +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; +id value +1 1 +2 2 +3 3 +connection master; +SET autocommit=off; +select * from t1; +id value +1 1 +2 2 +3 3 +SAVEPOINT A; +select * from t1; +id value +1 1 +2 2 +3 3 +SAVEPOINT A; +insert into t1 values (35, 35); +ROLLBACK TO SAVEPOINT A; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +START TRANSACTION; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +select * from t1; +id value +1 1 +2 2 +3 3 +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; +id value +1 1 +2 2 +3 3 +connection master; +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result new file mode 100644 index 0000000000000..cdf0c37e33926 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement.result @@ -0,0 +1,57 @@ +include/master-slave.inc +[connection master] +connection master; +drop table if exists t1; +connection master; +select @@binlog_format; +@@binlog_format +STATEMENT +create table t1 (pk int primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); +ERROR HY000: Can't execute updates on master with binlog_format != ROW. +set session rocksdb_unsafe_for_binlog=on; +insert into t1 values (1),(2),(3); +select * from t1; +pk +1 +2 +3 +delete from t1; +set session rocksdb_unsafe_for_binlog=off; +insert into t1 values (1),(2),(3); +ERROR HY000: Can't execute updates on master with binlog_format != ROW. +set binlog_format=row; +insert into t1 values (1),(2),(3); +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; +pk +1 +2 +3 +connection master; +drop table t1; +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; +include/sync_slave_sql_with_master.inc +connection slave; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where id=2; +id value value2 +2 1 200 +select * from t1 where id=3; +id value value2 +3 1 300 +connection master; +drop table t1; +set binlog_format=row; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result new file mode 100644 index 0000000000000..9e71ffa72f0aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rpl_statement_not_found.result @@ -0,0 +1,70 @@ +include/master-slave.inc +[connection master] +connection master; +drop table if exists t1; +connection master; +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( +pk int primary key, +kp1 int, +kp2 int, +col1 int, +key (kp1,kp2) +) engine=rocksdb; +set @tmp_binlog_format=@@binlog_format; +set @@binlog_format=ROW; +insert into t2 select a,a,a,a from t1; +create table t3 like t2; +insert into t3 select * from t2; +set binlog_format=@tmp_binlog_format; +include/sync_slave_sql_with_master.inc +connection slave; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +connection master; +update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; +connection slave; +set debug_sync= 'now WAIT_FOR Reached'; +set global debug_dbug = ''; +set sql_log_bin=0; +delete from t2 where pk=2; +delete from t2 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +connection master; +include/sync_slave_sql_with_master.inc +connection slave; +select * from t2 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 4 +connection slave; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; +include/stop_slave.inc +include/start_slave.inc +connection master; +update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; +connection slave; +call mtr.add_suppression("Deadlock found when trying to get lock"); +set debug_sync= 'now WAIT_FOR Reached'; +set global debug_dbug = ''; +set sql_log_bin=0; +delete from t3 where pk=2; +delete from t3 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; +connection master; +include/sync_slave_sql_with_master.inc +connection slave; +select * from t3 where pk < 5; +pk kp1 kp2 col1 +0 0 0 0 +1 1 1 1 +4 4 4 100 +set debug_sync='RESET'; +connection master; +drop table t0, t1, t2, t3; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result new file mode 100644 index 0000000000000..766795932b001 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_examples.result @@ -0,0 +1,3 @@ +CREATE DATABASE IF NOT EXISTS rqg_examples; +Running test with grammar file example.yy +DROP DATABASE rqg_examples; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result new file mode 100644 index 0000000000000..b0a1c4080066b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result @@ -0,0 +1,29 @@ +call mtr.add_suppression("Did not write failed "); +call mtr.add_suppression("Can't open and lock privilege tables"); +SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER; +CREATE TABLE mysql.user_temp LIKE mysql.user; +INSERT mysql.user_temp SELECT * FROM mysql.user; +CREATE TABLE mysql.tables_priv_temp LIKE mysql.tables_priv; +INSERT mysql.tables_priv_temp SELECT * FROM mysql.tables_priv_temp; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file alter_online.yy +DROP DATABASE rqg_runtime; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file concurrency_1.yy +DROP DATABASE rqg_runtime; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file connect_kill_sql.yy +DROP DATABASE rqg_runtime; +CREATE DATABASE IF NOT EXISTS rqg_runtime; +Running test with grammar file metadata_stability.yy +DROP DATABASE rqg_runtime; +DELETE FROM mysql.tables_priv; +DELETE FROM mysql.user; +INSERT mysql.user SELECT * FROM mysql.user_temp; +INSERT mysql.tables_priv SELECT * FROM mysql.tables_priv_temp; +DROP TABLE mysql.user_temp; +DROP TABLE mysql.tables_priv_temp; +DROP TABLE IF EXISTS test.executors; +DROP DATABASE IF EXISTS testdb_N; +DROP DATABASE IF EXISTS testdb_S; +SET GLOBAL EVENT_SCHEDULER = @ORIG_EVENT_SCHEDULER; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result new file mode 100644 index 0000000000000..23705d493e779 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_transactions.result @@ -0,0 +1,11 @@ +call mtr.add_suppression("Deadlock found when trying to get lock"); +CREATE DATABASE IF NOT EXISTS rqg_transactions; +Running test with grammar file transactions.yy +Running test with grammar file repeatable_read.yy +Running test with grammar file transaction_durability.yy +Running test with grammar file transactions-flat.yy +Running test with grammar file combinations.yy +Running test with grammar file repeatable_read.yy +Running test with grammar file transaction_durability.yy +Running test with grammar file transactions-flat.yy +DROP DATABASE rqg_transactions; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select.result b/storage/rocksdb/mysql-test/rocksdb/r/select.result new file mode 100644 index 0000000000000..22a6ca9bc8743 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select.result @@ -0,0 +1,373 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foobar'),(1,'z'),(200,'bar'); +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a,b) SELECT a, b FROM t1; +INSERT INTO t1 (a,b) SELECT a, b FROM t2; +SELECT * FROM t1; +a b pk +1 z 2 +1 z 5 +100 foobar 1 +100 foobar 4 +200 bar 3 +200 bar 6 +SELECT DISTINCT a FROM t1; +a +1 +100 +200 +SELECT ALL b, a FROM t1; +b a +bar 200 +bar 200 +foobar 100 +foobar 100 +z 1 +z 1 +SELECT STRAIGHT_JOIN SQL_CACHE t1.* FROM t2, t1 WHERE t1.a <> t2.a; +a b pk +1 z 2 +1 z 2 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +SELECT SQL_SMALL_RESULT SQL_NO_CACHE t1.a FROM t1, t2; +a +1 +1 +1 +1 +1 +1 +100 +100 +100 +100 +100 +100 +200 +200 +200 +200 +200 +200 +SELECT SQL_BIG_RESULT SQL_CALC_FOUND_ROWS DISTINCT(t2.a) +FROM t1 t1_1, t2, t1 t1_2; +a +1 +100 +200 +SELECT FOUND_ROWS(); +FOUND_ROWS() +3 +SET GLOBAL query_cache_size = 1024*1024; +SELECT SQL_CACHE * FROM t1, t2; +a b pk a b pk +1 z 2 1 z 2 +1 z 2 100 foobar 1 +1 z 2 200 bar 3 +1 z 5 1 z 2 +1 z 5 100 foobar 1 +1 z 5 200 bar 3 +100 foobar 1 1 z 2 +100 foobar 1 100 foobar 1 +100 foobar 1 200 bar 3 +100 foobar 4 1 z 2 +100 foobar 4 100 foobar 1 +100 foobar 4 200 bar 3 +200 bar 3 1 z 2 +200 bar 3 100 foobar 1 +200 bar 3 200 bar 3 +200 bar 6 1 z 2 +200 bar 6 100 foobar 1 +200 bar 6 200 bar 3 +SET GLOBAL query_cache_size = 1048576; +SELECT a+10 AS field1, CONCAT(b,':',b) AS field2 FROM t1 +WHERE b > 'b' AND a IS NOT NULL +GROUP BY 2 DESC, field1 ASC +HAVING field1 < 1000 +ORDER BY field2, 1 DESC, field1*2 +LIMIT 5 OFFSET 1; +field1 field2 +11 z:z +110 foobar:foobar +SELECT SUM(a), MAX(a), b FROM t1 GROUP BY b WITH ROLLUP; +SUM(a) MAX(a) b +2 1 z +200 100 foobar +400 200 bar +602 200 NULL +SELECT * FROM t2 WHERE a>0 PROCEDURE ANALYSE(); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t2.a 1 200 1 3 0 0 100.3333 81.2418 ENUM('1','100','200') NOT NULL +test.t2.b bar z 1 6 0 0 3.3333 NULL ENUM('bar','foobar','z') NOT NULL +test.t2.pk 1 3 1 1 0 0 2.0000 0.8165 ENUM('1','2','3') NOT NULL +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a +INTO OUTFILE '/select.out' +CHARACTER SET utf8 +FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY ''''; +200,'bar' +200,'bar' +100,'foobar' +100,'foobar' +1,'z' +1,'z' +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a +INTO DUMPFILE '/select.dump'; +ERROR 42000: Result consisted of more than one row +SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1 +INTO DUMPFILE '/select.dump'; +1z2200bar3 +SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max; +SELECT @min, @max; +@min @max +1 200 +SELECT t1_1.*, t2.* FROM t2, t1 AS t1_1, t1 AS t1_2 +WHERE t1_1.a = t1_2.a AND t2.a = t1_1.a; +a b pk a b pk +1 z 2 1 z 2 +1 z 2 1 z 2 +1 z 5 1 z 2 +1 z 5 1 z 2 +100 foobar 1 100 foobar 1 +100 foobar 1 100 foobar 1 +100 foobar 4 100 foobar 1 +100 foobar 4 100 foobar 1 +200 bar 3 200 bar 3 +200 bar 3 200 bar 3 +200 bar 6 200 bar 3 +200 bar 6 200 bar 3 +SELECT alias1.* FROM ( SELECT a,b FROM t1 ) alias1, t2 WHERE t2.a IN (100,200); +a b +1 z +1 z +1 z +1 z +100 foobar +100 foobar +100 foobar +100 foobar +200 bar +200 bar +200 bar +200 bar +SELECT t1.a FROM { OJ t1 LEFT OUTER JOIN t2 ON t1.a = t2.a+10 }; +a +1 +1 +100 +100 +200 +200 +SELECT t1.* FROM t2 INNER JOIN t1; +a b pk +1 z 2 +1 z 2 +1 z 2 +1 z 5 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +200 bar 6 +SELECT t1_2.* FROM t1 t1_1 CROSS JOIN t1 t1_2 ON t1_1.b = t1_2.b; +a b pk +1 z 2 +1 z 2 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 WHERE t1.b > t2.b; +a b +1 bar +1 bar +1 foobar +1 foobar +100 bar +100 bar +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 ON t1.b > t2.b ORDER BY t1.a, t2.b; +a b +1 bar +1 bar +1 foobar +1 foobar +100 bar +100 bar +SELECT t2.* FROM t1 LEFT JOIN t2 USING (a) ORDER BY t2.a, t2.b LIMIT 1; +a b pk +1 z 2 +SELECT t2.* FROM t2 LEFT OUTER JOIN t1 ON t1.a = t2.a WHERE t1.a IS NOT NULL; +a b pk +1 z 2 +1 z 2 +100 foobar 1 +100 foobar 1 +200 bar 3 +200 bar 3 +SELECT SUM(t2.a) FROM t1 RIGHT JOIN t2 ON t2.b = t1.b; +SUM(t2.a) +602 +SELECT MIN(t2.a) FROM t1 RIGHT OUTER JOIN t2 USING (b,a); +MIN(t2.a) +1 +SELECT alias.b FROM t1 NATURAL JOIN ( SELECT a,b FROM t1 ) alias WHERE b > ''; +b +bar +bar +bar +bar +foobar +foobar +foobar +foobar +z +z +z +z +SELECT t2.b FROM ( SELECT a,b FROM t1 ) alias NATURAL LEFT JOIN t2 WHERE b IS NOT NULL; +b +bar +bar +foobar +foobar +z +z +SELECT t1.*, t2.* FROM t1 NATURAL LEFT OUTER JOIN t2; +a b pk a b pk +1 z 2 1 z 2 +1 z 5 NULL NULL NULL +100 foobar 1 100 foobar 1 +100 foobar 4 NULL NULL NULL +200 bar 3 200 bar 3 +200 bar 6 NULL NULL NULL +SELECT t2_2.* FROM t2 t2_1 NATURAL RIGHT JOIN t2 t2_2 WHERE t2_1.a IN ( SELECT a FROM t1 ); +a b pk +1 z 2 +100 foobar 1 +200 bar 3 +SELECT t1_2.b FROM t1 t1_1 NATURAL RIGHT OUTER JOIN t1 t1_2 INNER JOIN t2; +b +bar +bar +bar +bar +bar +bar +foobar +foobar +foobar +foobar +foobar +foobar +z +z +z +z +z +z +SELECT ( SELECT MIN(a) FROM ( SELECT a,b FROM t1 ) alias1 ) AS min_a FROM t2; +min_a +1 +1 +1 +SELECT a,b FROM t2 WHERE a = ( SELECT MIN(a) FROM t1 ); +a b +1 z +SELECT a,b FROM t2 WHERE b LIKE ( SELECT b FROM t1 ORDER BY b LIMIT 1 ); +a b +200 bar +SELECT t2.* FROM t1 t1_outer, t2 WHERE ( t1_outer.a, t2.b ) IN ( SELECT a, b FROM t2 WHERE a = t1_outer.a ); +a b pk +1 z 2 +1 z 2 +100 foobar 1 +100 foobar 1 +200 bar 3 +200 bar 3 +SELECT a,b FROM t2 WHERE b = ANY ( SELECT b FROM t1 WHERE a > 1 ); +a b +100 foobar +200 bar +SELECT a,b FROM t2 WHERE b > ALL ( SELECT b FROM t1 WHERE b < 'foo' ); +a b +1 z +100 foobar +SELECT a,b FROM t1 WHERE ROW(a, b) = ( SELECT a, b FROM t2 ORDER BY a, b LIMIT 1 ); +a b +1 z +1 z +SELECT a,b FROM t1 WHERE EXISTS ( SELECT a,b FROM t2 WHERE t2.b > t1.b ); +a b +100 foobar +100 foobar +200 bar +200 bar +SELECT t1.* FROM t1, t2 ORDER BY ( SELECT b FROM t1 WHERE a IS NULL ORDER BY b LIMIT 1 ) DESC; +a b pk +1 z 2 +1 z 2 +1 z 2 +1 z 5 +1 z 5 +1 z 5 +100 foobar 1 +100 foobar 1 +100 foobar 1 +100 foobar 4 +100 foobar 4 +100 foobar 4 +200 bar 3 +200 bar 3 +200 bar 3 +200 bar 6 +200 bar 6 +200 bar 6 +SELECT a, b FROM t1 HAVING a IN ( SELECT a FROM t2 WHERE b = t1.b ); +a b +1 z +1 z +100 foobar +100 foobar +200 bar +200 bar +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION DISTINCT SELECT a,b FROM t1; +a b +1 z +100 foobar +200 bar +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION ALL SELECT a,b FROM t1; +a b +1 z +1 z +1 z +100 foobar +100 foobar +100 foobar +200 bar +200 bar +200 bar +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result new file mode 100644 index 0000000000000..2890941a1b931 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update.result @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +connect con1,localhost,root,,; +BEGIN; +SELECT a,b FROM t1 WHERE b='a' FOR UPDATE; +a b +1 a +3 a +connection default; +SET lock_wait_timeout = 1; +SELECT a,b FROM t1 WHERE b='a'; +a b +1 a +3 a +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +UPDATE t1 SET b='c' WHERE b='a'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +COMMIT; +SELECT a,b FROM t1; +a b +1 a +2 b +3 a +disconnect con1; +connection default; +UPDATE t1 SET b='c' WHERE b='a'; +SELECT a,b FROM t1; +a b +1 c +2 b +3 c +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result new file mode 100644 index 0000000000000..044aa4d6fc782 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_for_update_skip_locked_nowait.result @@ -0,0 +1,28 @@ +drop table if exists t1; +create table t1 (a int primary key) engine=rocksdb; +insert into t1 values (1), (2), (3); +Should succeed since no table gets involved +select 1 for update skip locked; +1 +1 +select * from nonexistence for update skip locked; +ERROR 42S02: Table 'test.nonexistence' doesn't exist +select * from t1 for update skip locked; +ERROR HY000: Table storage engine for 't1' doesn't have this option +select * from t1 where a > 1 and a < 3 for update skip locked; +ERROR HY000: Table storage engine for 't1' doesn't have this option +insert into t1 select * from t1 for update skip locked; +ERROR HY000: Table storage engine for 't1' doesn't have this option +Should succeed since no table gets involved +select 1 for update nowait; +1 +1 +select * from nonexistence for update nowait; +ERROR 42S02: Table 'test.nonexistence' doesn't exist +select * from t1 for update nowait; +ERROR HY000: Table storage engine for 't1' doesn't have this option +select * from t1 where a > 1 and a < 3 for update nowait; +ERROR HY000: Table storage engine for 't1' doesn't have this option +insert into t1 select * from t1 for update nowait; +ERROR HY000: Table storage engine for 't1' doesn't have this option +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result new file mode 100644 index 0000000000000..b073b8871154a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/select_lock_in_share_mode.result @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +connect con1,localhost,root,,; +BEGIN; +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; +a b +1 a +3 a +connection default; +SET lock_wait_timeout = 1; +SELECT a,b FROM t1 WHERE b='a'; +a b +1 a +3 a +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; +a b +1 a +3 a +UPDATE t1 SET b='c' WHERE b='a'; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +COMMIT; +SELECT a,b FROM t1; +a b +1 a +2 b +3 a +disconnect con1; +connection default; +UPDATE t1 SET b='c' WHERE b='a'; +SELECT a,b FROM t1; +a b +1 c +2 b +3 c +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result new file mode 100644 index 0000000000000..19d794da848eb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result @@ -0,0 +1,343 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB; +CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB +PARTITION BY KEY(l) PARTITIONS 4; +SHOW ENGINE rocksdb STATUS; +Type Name Status +DBSTATS rocksdb # +CF_COMPACTION __system__ # +CF_COMPACTION cf_t1 # +CF_COMPACTION default # +CF_COMPACTION rev:cf_t2 # +Memory_Stats rocksdb # +INSERT INTO t1 VALUES (1), (2), (3); +SELECT COUNT(*) FROM t1; +COUNT(*) +3 +INSERT INTO t2 VALUES (1), (2), (3), (4); +SELECT COUNT(*) FROM t2; +COUNT(*) +4 +INSERT INTO t4 VALUES (1), (2), (3), (4), (5); +SELECT COUNT(*) FROM t4; +COUNT(*) +5 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CFSTATS; +CF_NAME STAT_TYPE VALUE +__system__ NUM_IMMUTABLE_MEM_TABLE # +__system__ MEM_TABLE_FLUSH_PENDING # +__system__ COMPACTION_PENDING # +__system__ CUR_SIZE_ACTIVE_MEM_TABLE # +__system__ CUR_SIZE_ALL_MEM_TABLES # +__system__ NUM_ENTRIES_ACTIVE_MEM_TABLE # +__system__ NUM_ENTRIES_IMM_MEM_TABLES # +__system__ NON_BLOCK_CACHE_SST_MEM_USAGE # +__system__ NUM_LIVE_VERSIONS # +cf_t1 NUM_IMMUTABLE_MEM_TABLE # +cf_t1 MEM_TABLE_FLUSH_PENDING # +cf_t1 COMPACTION_PENDING # +cf_t1 CUR_SIZE_ACTIVE_MEM_TABLE # +cf_t1 CUR_SIZE_ALL_MEM_TABLES # +cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE # +cf_t1 NUM_ENTRIES_IMM_MEM_TABLES # +cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE # +cf_t1 NUM_LIVE_VERSIONS # +default NUM_IMMUTABLE_MEM_TABLE # +default MEM_TABLE_FLUSH_PENDING # +default COMPACTION_PENDING # +default CUR_SIZE_ACTIVE_MEM_TABLE # +default CUR_SIZE_ALL_MEM_TABLES # +default NUM_ENTRIES_ACTIVE_MEM_TABLE # +default NUM_ENTRIES_IMM_MEM_TABLES # +default NON_BLOCK_CACHE_SST_MEM_USAGE # +default NUM_LIVE_VERSIONS # +rev:cf_t2 NUM_IMMUTABLE_MEM_TABLE # +rev:cf_t2 MEM_TABLE_FLUSH_PENDING # +rev:cf_t2 COMPACTION_PENDING # +rev:cf_t2 CUR_SIZE_ACTIVE_MEM_TABLE # +rev:cf_t2 CUR_SIZE_ALL_MEM_TABLES # +rev:cf_t2 NUM_ENTRIES_ACTIVE_MEM_TABLE # +rev:cf_t2 NUM_ENTRIES_IMM_MEM_TABLES # +rev:cf_t2 NON_BLOCK_CACHE_SST_MEM_USAGE # +rev:cf_t2 NUM_LIVE_VERSIONS # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DBSTATS; +STAT_TYPE VALUE +DB_BACKGROUND_ERRORS # +DB_NUM_SNAPSHOTS # +DB_OLDEST_SNAPSHOT_TIME # +DB_BLOCK_CACHE_USAGE # +SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, COUNT(STAT_TYPE) +FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_SCHEMA = 'test' +GROUP BY TABLE_NAME, PARTITION_NAME; +TABLE_SCHEMA TABLE_NAME PARTITION_NAME COUNT(STAT_TYPE) +test t1 NULL 43 +test t2 NULL 43 +test t4 p0 43 +test t4 p1 43 +test t4 p2 43 +test t4 p3 43 +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS; +CF_NAME OPTION_TYPE VALUE +__system__ COMPARATOR # +__system__ MERGE_OPERATOR # +__system__ COMPACTION_FILTER # +__system__ COMPACTION_FILTER_FACTORY # +__system__ WRITE_BUFFER_SIZE # +__system__ MAX_WRITE_BUFFER_NUMBER # +__system__ MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +__system__ NUM_LEVELS # +__system__ LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +__system__ LEVEL0_SLOWDOWN_WRITES_TRIGGER # +__system__ LEVEL0_STOP_WRITES_TRIGGER # +__system__ MAX_MEM_COMPACTION_LEVEL # +__system__ TARGET_FILE_SIZE_BASE # +__system__ TARGET_FILE_SIZE_MULTIPLIER # +__system__ MAX_BYTES_FOR_LEVEL_BASE # +__system__ LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER # +__system__ SOFT_RATE_LIMIT # +__system__ HARD_RATE_LIMIT # +__system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS # +__system__ ARENA_BLOCK_SIZE # +__system__ DISABLE_AUTO_COMPACTIONS # +__system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH # +__system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +__system__ MEMTABLE_FACTORY # +__system__ INPLACE_UPDATE_SUPPORT # +__system__ INPLACE_UPDATE_NUM_LOCKS # +__system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +__system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +__system__ BLOOM_LOCALITY # +__system__ MAX_SUCCESSIVE_MERGES # +__system__ OPTIMIZE_FILTERS_FOR_HITS # +__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +__system__ COMPRESSION_TYPE # +__system__ COMPRESSION_PER_LEVEL # +__system__ COMPRESSION_OPTS # +__system__ BOTTOMMOST_COMPRESSION # +__system__ PREFIX_EXTRACTOR # +__system__ COMPACTION_STYLE # +__system__ COMPACTION_OPTIONS_UNIVERSAL # +__system__ COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +__system__ BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +__system__ BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +__system__ BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +__system__ BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +__system__ BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +__system__ BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +__system__ BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +__system__ BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +cf_t1 COMPARATOR # +cf_t1 MERGE_OPERATOR # +cf_t1 COMPACTION_FILTER # +cf_t1 COMPACTION_FILTER_FACTORY # +cf_t1 WRITE_BUFFER_SIZE # +cf_t1 MAX_WRITE_BUFFER_NUMBER # +cf_t1 MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +cf_t1 NUM_LEVELS # +cf_t1 LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +cf_t1 LEVEL0_SLOWDOWN_WRITES_TRIGGER # +cf_t1 LEVEL0_STOP_WRITES_TRIGGER # +cf_t1 MAX_MEM_COMPACTION_LEVEL # +cf_t1 TARGET_FILE_SIZE_BASE # +cf_t1 TARGET_FILE_SIZE_MULTIPLIER # +cf_t1 MAX_BYTES_FOR_LEVEL_BASE # +cf_t1 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER # +cf_t1 SOFT_RATE_LIMIT # +cf_t1 HARD_RATE_LIMIT # +cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS # +cf_t1 ARENA_BLOCK_SIZE # +cf_t1 DISABLE_AUTO_COMPACTIONS # +cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH # +cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +cf_t1 MEMTABLE_FACTORY # +cf_t1 INPLACE_UPDATE_SUPPORT # +cf_t1 INPLACE_UPDATE_NUM_LOCKS # +cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +cf_t1 BLOOM_LOCALITY # +cf_t1 MAX_SUCCESSIVE_MERGES # +cf_t1 OPTIMIZE_FILTERS_FOR_HITS # +cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +cf_t1 COMPRESSION_TYPE # +cf_t1 COMPRESSION_PER_LEVEL # +cf_t1 COMPRESSION_OPTS # +cf_t1 BOTTOMMOST_COMPRESSION # +cf_t1 PREFIX_EXTRACTOR # +cf_t1 COMPACTION_STYLE # +cf_t1 COMPACTION_OPTIONS_UNIVERSAL # +cf_t1 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +cf_t1 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +cf_t1 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +cf_t1 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +cf_t1 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +default COMPARATOR # +default MERGE_OPERATOR # +default COMPACTION_FILTER # +default COMPACTION_FILTER_FACTORY # +default WRITE_BUFFER_SIZE # +default MAX_WRITE_BUFFER_NUMBER # +default MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +default NUM_LEVELS # +default LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +default LEVEL0_SLOWDOWN_WRITES_TRIGGER # +default LEVEL0_STOP_WRITES_TRIGGER # +default MAX_MEM_COMPACTION_LEVEL # +default TARGET_FILE_SIZE_BASE # +default TARGET_FILE_SIZE_MULTIPLIER # +default MAX_BYTES_FOR_LEVEL_BASE # +default LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +default MAX_BYTES_FOR_LEVEL_MULTIPLIER # +default SOFT_RATE_LIMIT # +default HARD_RATE_LIMIT # +default RATE_LIMIT_DELAY_MAX_MILLISECONDS # +default ARENA_BLOCK_SIZE # +default DISABLE_AUTO_COMPACTIONS # +default PURGE_REDUNDANT_KVS_WHILE_FLUSH # +default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +default MEMTABLE_FACTORY # +default INPLACE_UPDATE_SUPPORT # +default INPLACE_UPDATE_NUM_LOCKS # +default MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +default BLOOM_LOCALITY # +default MAX_SUCCESSIVE_MERGES # +default OPTIMIZE_FILTERS_FOR_HITS # +default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +default COMPRESSION_TYPE # +default COMPRESSION_PER_LEVEL # +default COMPRESSION_OPTS # +default BOTTOMMOST_COMPRESSION # +default PREFIX_EXTRACTOR # +default COMPACTION_STYLE # +default COMPACTION_OPTIONS_UNIVERSAL # +default COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +default BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +default BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +default BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +default BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +default BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +default BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +default BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +default BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +default BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +rev:cf_t2 COMPARATOR # +rev:cf_t2 MERGE_OPERATOR # +rev:cf_t2 COMPACTION_FILTER # +rev:cf_t2 COMPACTION_FILTER_FACTORY # +rev:cf_t2 WRITE_BUFFER_SIZE # +rev:cf_t2 MAX_WRITE_BUFFER_NUMBER # +rev:cf_t2 MIN_WRITE_BUFFER_NUMBER_TO_MERGE # +rev:cf_t2 NUM_LEVELS # +rev:cf_t2 LEVEL0_FILE_NUM_COMPACTION_TRIGGER # +rev:cf_t2 LEVEL0_SLOWDOWN_WRITES_TRIGGER # +rev:cf_t2 LEVEL0_STOP_WRITES_TRIGGER # +rev:cf_t2 MAX_MEM_COMPACTION_LEVEL # +rev:cf_t2 TARGET_FILE_SIZE_BASE # +rev:cf_t2 TARGET_FILE_SIZE_MULTIPLIER # +rev:cf_t2 MAX_BYTES_FOR_LEVEL_BASE # +rev:cf_t2 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES # +rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER # +rev:cf_t2 SOFT_RATE_LIMIT # +rev:cf_t2 HARD_RATE_LIMIT # +rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS # +rev:cf_t2 ARENA_BLOCK_SIZE # +rev:cf_t2 DISABLE_AUTO_COMPACTIONS # +rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH # +rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS # +rev:cf_t2 MEMTABLE_FACTORY # +rev:cf_t2 INPLACE_UPDATE_SUPPORT # +rev:cf_t2 INPLACE_UPDATE_NUM_LOCKS # +rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO # +rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE # +rev:cf_t2 BLOOM_LOCALITY # +rev:cf_t2 MAX_SUCCESSIVE_MERGES # +rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS # +rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL # +rev:cf_t2 COMPRESSION_TYPE # +rev:cf_t2 COMPRESSION_PER_LEVEL # +rev:cf_t2 COMPRESSION_OPTS # +rev:cf_t2 BOTTOMMOST_COMPRESSION # +rev:cf_t2 PREFIX_EXTRACTOR # +rev:cf_t2 COMPACTION_STYLE # +rev:cf_t2 COMPACTION_OPTIONS_UNIVERSAL # +rev:cf_t2 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +SHOW ENGINE rocksdb MUTEX; +Type Name Status +SHOW ENGINE ALL MUTEX; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +START TRANSACTION WITH CONSISTENT SNAPSHOT; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +---SNAPSHOT, ACTIVE NUM sec +MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION +SHOW ENGINE rocksdb TRANSACTION STATUS +lock count 0, write count 0 +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +ROLLBACK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result new file mode 100644 index 0000000000000..407a8b103bd49 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_table_status.result @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'a'),(2,'foo'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t2 (a,b) VALUES (1,'bar'); +set global rocksdb_force_flush_memtable_now = true; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; +SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' ); +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 2 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 ROCKSDB 10 Fixed 1 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 ROCKSDB 10 Fixed 1000 # # 0 0 0 NULL NULL NULL NULL utf8_general_ci NULL +SHOW TABLE STATUS WHERE name LIKE 't2'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t2 ROCKSDB 10 Fixed 10000 # # 0 0 0 NULL NULL NULL NULL latin1_swedish_ci NULL +DROP TABLE t1, t2, t3; +CREATE DATABASE `db_new..............................................end`; +USE `db_new..............................................end`; +CREATE TABLE `t1_new..............................................end`(a int) engine=rocksdb; +INSERT INTO `t1_new..............................................end` VALUES (1); +SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.table_statistics WHERE TABLE_NAME = 't1_new..............................................end'; +TABLE_SCHEMA db_new..............................................end +TABLE_NAME t1_new..............................................end +DROP DATABASE `db_new..............................................end`; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result b/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result new file mode 100644 index 0000000000000..f40aceffd798e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/shutdown.result @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t1; +SHOW GLOBAL VARIABLES LIKE "log_bin"; +Variable_name Value +log_bin ON +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result new file mode 100644 index 0000000000000..ef9fafc852a04 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result @@ -0,0 +1,66 @@ +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d < 10 then 'true' else 'false' end +true +CREATE TABLE t2 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t2; +Table Op Msg_type Msg_text +test.t2 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d > 9000 then 'true' else 'false' end +true +CREATE TABLE t3 (id INT, value int, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t3 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t3; +Table Op Msg_type Msg_text +test.t3 optimize status OK +select case when variable_value-@s = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s = 0 then 'true' else 'false' end +true +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d > 9000 then 'true' else 'false' end +true +CREATE TABLE t4 (id INT, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t4 VALUES (1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t4; +Table Op Msg_type Msg_text +test.t4 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d < 10 then 'true' else 'false' end +true +CREATE TABLE t5 (id1 INT, id2 INT, PRIMARY KEY (id1, id2), INDEX(id2)) ENGINE=RocksDB; +INSERT INTO t5 VALUES (1, 1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +optimize table t5; +Table Op Msg_type Msg_text +test.t5 optimize status OK +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end +true +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +case when variable_value-@d < 10 then 'true' else 'false' end +true +DROP TABLE t1, t2, t3, t4, t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result b/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result new file mode 100644 index 0000000000000..e8a11363dbad9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/slow_query_log.result @@ -0,0 +1,10 @@ +SET @cur_long_query_time = @@long_query_time; +SET @@long_query_time = 600; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE=ROCKSDB; +SET @@long_query_time = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +7500 +SET @@long_query_time = @cur_long_query_time; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result new file mode 100644 index 0000000000000..783449913604a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result @@ -0,0 +1,69 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +create table t1( +id bigint not null primary key auto_increment, +a varchar(255) not null, +b bigint, +index t1_1(b) +) engine=rocksdb; +create table t2( +id bigint not null primary key auto_increment, +a varchar(255) not null, +b bigint, +index t2_1(b) comment 'cf_t3' +) engine=rocksdb; +create table t3( +id bigint not null primary key auto_increment, +a varchar(255) not null, +b bigint, +index t3_1(b) comment 'rev:cf_t4' +) engine=rocksdb; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1'; +table_name table_rows +t2 1000 +t3 1000 +SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1'; +CASE WHEN table_rows < 100000 then 'true' else 'false' end +true +set global rocksdb_force_flush_memtable_now = true; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +t2 4999 +t3 4999 +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name data_length>0 index_length>0 +t1 1 1 +t2 1 1 +t3 1 1 +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +t2 4999 +t3 4999 +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name data_length>0 index_length>0 +t1 1 1 +t2 1 1 +t3 1 1 +analyze table t1,t2,t3,t4,t5; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +test.t4 analyze Error Table 'test.t4' doesn't exist +test.t4 analyze status Operation failed +test.t5 analyze Error Table 'test.t5' doesn't exist +test.t5 analyze status Operation failed +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name table_rows +t1 100000 +t2 4999 +t3 4999 +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); +table_name data_length>0 index_length>0 +t1 1 1 +t2 1 1 +t3 1 1 +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result new file mode 100644 index 0000000000000..31cb1b6477bf7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; +TABLE_SCHEMA test +TABLE_NAME t1 +ROWS_READ 1000 +ROWS_CHANGED 1000 +ROWS_CHANGED_X_INDEXES 1000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result new file mode 100644 index 0000000000000..7cc0cc7cd98c0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_ai.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb AUTO_INCREMENT=10; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1; +a +10 +ALTER TABLE t1 AUTO_INCREMENT=100; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=100 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; +a +10 +100 +ALTER TABLE t1 AUTO_INCREMENT=50; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB AUTO_INCREMENT=101 DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; +a +10 +100 +101 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result new file mode 100644 index 0000000000000..f904c04e0fb51 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_avg_row_length.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb AVG_ROW_LENGTH=300; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=300 +ALTER TABLE t1 AVG_ROW_LENGTH=30000000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 AVG_ROW_LENGTH=30000000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result new file mode 100644 index 0000000000000..d9cc69ee2a1d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_checksum.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CHECKSUM=1 +ALTER TABLE t1 CHECKSUM=0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result new file mode 100644 index 0000000000000..0beddd9f6e3b0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_connection.result @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t1; +CREATE DATABASE test_remote; +CREATE SERVER test_connection FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); +CREATE SERVER test_connection2 FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CONNECTION='test_connection'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CONNECTION='test_connection' +ALTER TABLE t1 CONNECTION='test_connection2'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 CONNECTION='test_connection2' +DROP TABLE t1; +DROP SERVER test_connection; +DROP SERVER test_connection2; +DROP DATABASE test_remote; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result new file mode 100644 index 0000000000000..d1e445f734c29 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +show warnings; +Level Code Message +Warning 1296 Got error 198 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +show warnings; +Level Code Message +Warning 1296 Got error 199 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) +( +PARTITION P0 VALUES LESS THAN (1000) +DATA DIRECTORY = '/foo/bar/data/', +PARTITION P1 VALUES LESS THAN (2000) +DATA DIRECTORY = '/foo/bar/data/', +PARTITION P2 VALUES LESS THAN (MAXVALUE) +); +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) +( +PARTITION P0 VALUES LESS THAN (1000) +INDEX DIRECTORY = '/foo/bar/data/', +PARTITION P1 VALUES LESS THAN (2000) +INDEX DIRECTORY = '/foo/bar/data/', +PARTITION P2 VALUES LESS THAN (MAXVALUE) +); +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result new file mode 100644 index 0000000000000..c5d1ad8ace910 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_delay_key_write.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DELAY_KEY_WRITE=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1 +ALTER TABLE t1 DELAY_KEY_WRITE=0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result new file mode 100644 index 0000000000000..bd5e65f59c4f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_insert_method.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INSERT_METHOD=FIRST; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 INSERT_METHOD=NO; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result new file mode 100644 index 0000000000000..6c34d08b7eb9c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_key_block_size.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb KEY_BLOCK_SIZE=8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=8 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) KEY_BLOCK_SIZE=8 +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result new file mode 100644 index 0000000000000..679e00e0771ad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_max_rows.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MAX_ROWS=10000000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MAX_ROWS=10000000 +ALTER TABLE t1 MAX_ROWS=30000000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MAX_ROWS=30000000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result new file mode 100644 index 0000000000000..bc650434b7a79 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_min_rows.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MIN_ROWS=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MIN_ROWS=1 +ALTER TABLE t1 MIN_ROWS=10000; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 MIN_ROWS=10000 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result new file mode 100644 index 0000000000000..b42d3f4d4502f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_pack_keys.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PACK_KEYS=1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 PACK_KEYS=1 +ALTER TABLE t1 PACK_KEYS=0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 PACK_KEYS=0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result new file mode 100644 index 0000000000000..80ec79497ce5b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_password.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PASSWORD='password'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 PASSWORD='new_password'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result new file mode 100644 index 0000000000000..a0c480eaede85 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_row_format.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb ROW_FORMAT=FIXED; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) NOT NULL, + PRIMARY KEY (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) NOT NULL, + PRIMARY KEY (`b`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result new file mode 100644 index 0000000000000..fb021dbda1800 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_union.result @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1, child1, child2; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb UNION(child1); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 UNION = (child1,child2); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1, child1, child2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result new file mode 100644 index 0000000000000..ecfc72932ccb1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_standard_opts.result @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb +DEFAULT CHARACTER SET = utf8 +COLLATE = utf8_general_ci +COMMENT = 'standard table options' +; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='standard table options' +ALTER TABLE t1 COMMENT = 'table altered'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='table altered' +ALTER TABLE t1 ENGINE=MEMORY; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=MEMORY DEFAULT CHARSET=utf8 COMMENT='table altered' +ALTER TABLE t1 ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COMMENT='table altered' +ALTER TABLE t1 CHARACTER SET = latin1 COLLATE = latin1_swedish_ci; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` char(8) CHARACTER SET utf8 DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COMMENT='table altered' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result b/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result new file mode 100644 index 0000000000000..4c1e02211788d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/tmpdir.result @@ -0,0 +1,32 @@ +# If rocksdb_tmpdir is NULL or "", temporary file will be created in +# server configuration variable location(--tmpdir) +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +# Connection con1 +connect con1,localhost,root; +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +connection default; +disconnect con1; +DROP TABLE t1; +# rocksdb_tmpdir with valid location. +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +set @tmpdir = @@global.tmpdir; +set global rocksdb_tmpdir = @tmpdir; +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +# Connection con3 +connect con2,localhost,root; +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir MYSQL_TMP_DIR/mysqld.1 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +disconnect con2; +connection default; +set global rocksdb_tmpdir=NULL; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/transaction.result b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result new file mode 100644 index 0000000000000..fe13c1633a8e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/transaction.result @@ -0,0 +1,936 @@ +create table t1 (id int primary key, value int, value2 varchar(100), index(value)) engine=rocksdb; +insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(8,8,8),(9,9,9),(10,10,10); +begin; +insert into t1 values (11,11,11); +select * from t1 where id=1; +id value value2 +1 1 1 +select * from t1 where value=1; +id value value2 +1 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +1 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select value from t1 where value < 3 order by id; +value +1 +2 +select * from t1 where value2 < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select * from t1 order by id; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +11 11 11 +select value from t1 order by id; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +11 +rollback; +begin; +insert into t1 values (7,7,7); +select * from t1 where id=1; +id value value2 +1 1 1 +select * from t1 where value=1; +id value value2 +1 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +1 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select value from t1 where value < 3 order by id; +value +1 +2 +select * from t1 where value2 < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select * from t1 order by id; +id value value2 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +rollback; +begin; +update t1 set value2=100 where id=1; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where value=1; +id value value2 +1 1 100 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 1 100 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +1 1 100 +2 2 2 +select value from t1 where value < 3 order by id; +value +1 +2 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +select * from t1 order by id; +id value value2 +1 1 100 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value=100 where id=1; +select * from t1 where id=1; +id value value2 +1 100 1 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +1 100 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 100 1 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +select value from t1 where value < 3 order by id; +value +2 +select * from t1 where value2 < 3 order by id; +id value value2 +1 100 1 +2 2 2 +select * from t1 order by id; +id value value2 +1 100 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +100 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set id=100 where id=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +100 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +100 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +100 1 1 +select value from t1 where value < 3 order by id; +value +2 +1 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +100 1 1 +select * from t1 order by id; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +100 1 1 +select value from t1 order by id; +value +2 +3 +4 +5 +6 +8 +9 +10 +1 +rollback; +begin; +update t1 set value2=100 where value=1; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where value=1; +id value value2 +1 1 100 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 1 100 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +1 1 100 +2 2 2 +select value from t1 where value < 3 order by id; +value +1 +2 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +select * from t1 order by id; +id value value2 +1 1 100 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value=100 where value=1; +select * from t1 where id=1; +id value value2 +1 100 1 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +1 100 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 100 1 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +select value from t1 where value < 3 order by id; +value +2 +select * from t1 where value2 < 3 order by id; +id value value2 +1 100 1 +2 2 2 +select * from t1 order by id; +id value value2 +1 100 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +100 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set id=100 where value=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +100 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +100 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +100 1 1 +select value from t1 where value < 3 order by id; +value +2 +1 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +100 1 1 +select * from t1 order by id; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +100 1 1 +select value from t1 order by id; +value +2 +3 +4 +5 +6 +8 +9 +10 +1 +rollback; +begin; +update t1 set value2=100 where value2=1; +select * from t1 where id=1; +id value value2 +1 1 100 +select * from t1 where value=1; +id value value2 +1 1 100 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 1 100 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +1 1 100 +2 2 2 +select value from t1 where value < 3 order by id; +value +1 +2 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +select * from t1 order by id; +id value value2 +1 1 100 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +1 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set value=100 where value2=1; +select * from t1 where id=1; +id value value2 +1 100 1 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +1 100 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 100 1 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +select value from t1 where value < 3 order by id; +value +2 +select * from t1 where value2 < 3 order by id; +id value value2 +1 100 1 +2 2 2 +select * from t1 order by id; +id value value2 +1 100 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +100 +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +update t1 set id=100 where value2=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +100 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +100 1 1 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +100 1 1 +select value from t1 where value < 3 order by id; +value +2 +1 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +100 1 1 +select * from t1 order by id; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +100 1 1 +select value from t1 order by id; +value +2 +3 +4 +5 +6 +8 +9 +10 +1 +rollback; +begin; +delete from t1 where id=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +select value from t1 where value < 3 order by id; +value +2 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +select * from t1 order by id; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +delete from t1 where value=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +select value from t1 where value < 3 order by id; +value +2 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +select * from t1 order by id; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +delete from t1 where value2=1; +select * from t1 where id=1; +id value value2 +select * from t1 where value=1; +id value value2 +select value from t1 where value=1; +value +select * from t1 where value2=1; +id value value2 +select * from t1 where id=5; +id value value2 +5 5 5 +select * from t1 where value=5; +id value value2 +5 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +5 5 5 +select * from t1 where id < 3 order by id; +id value value2 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +2 2 2 +select value from t1 where value < 3 order by id; +value +2 +select * from t1 where value2 < 3 order by id; +id value value2 +2 2 2 +select * from t1 order by id; +id value value2 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +8 8 8 +9 9 9 +10 10 10 +select value from t1 order by id; +value +2 +3 +4 +5 +6 +8 +9 +10 +rollback; +begin; +insert into t1 values (11,11,11); +insert into t1 values (12,12,12); +insert into t1 values (13,13,13); +delete from t1 where id=9; +delete from t1 where value=8; +update t1 set id=100 where value2=5; +update t1 set value=103 where value=4; +update t1 set id=115 where id=3; +select * from t1 where id=1; +id value value2 +1 1 1 +select * from t1 where value=1; +id value value2 +1 1 1 +select value from t1 where value=1; +value +1 +select * from t1 where value2=1; +id value value2 +1 1 1 +select * from t1 where id=5; +id value value2 +select * from t1 where value=5; +id value value2 +100 5 5 +select value from t1 where value=5; +value +5 +select * from t1 where value2=5; +id value value2 +100 5 5 +select * from t1 where id < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select * from t1 where value < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select value from t1 where value < 3 order by id; +value +1 +2 +select * from t1 where value2 < 3 order by id; +id value value2 +1 1 1 +2 2 2 +select * from t1 order by id; +id value value2 +1 1 1 +2 2 2 +4 103 4 +6 6 6 +10 10 10 +11 11 11 +12 12 12 +13 13 13 +100 5 5 +115 3 3 +select value from t1 order by id; +value +1 +2 +103 +6 +10 +11 +12 +13 +5 +3 +rollback; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result new file mode 100644 index 0000000000000..e6ff6e1ca3220 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table.result @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +TRUNCATE TABLE t1; +INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c'); +TRUNCATE TABLE t1; +SELECT a,b FROM t1; +a b +DROP TABLE t1; +CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb; +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL +INSERT INTO t1 (c) VALUES ('a'),('b'),('c'); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +TRUNCATE TABLE t1; +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL +INSERT INTO t1 (c) VALUES ('d'); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # # 0 0 0 2 NULL NULL NULL latin1_swedish_ci NULL +SELECT a,c FROM t1; +a c +1 d +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +HANDLER t1 OPEN AS h1; +ERROR HY000: Storage engine ROCKSDB of the table `test`.`t1` doesn't have this option +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result new file mode 100644 index 0000000000000..813f651be62fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result @@ -0,0 +1,20 @@ +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); +DROP TABLE IF EXISTS t1; +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +CREATE TABLE t1 ( +a int not null, +b int not null, +c varchar(500) not null, +primary key (a,b) comment 'cf1', +key (b) comment 'rev:cf2' +) ENGINE=RocksDB; +DELETE FROM t1; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +truncate table t1; +select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +case when variable_value-@a < 500000 then 'true' else 'false' end +true +DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/trx_info.result b/storage/rocksdb/mysql-test/rocksdb/r/trx_info.result new file mode 100644 index 0000000000000..ada2e127021e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/trx_info.result @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1; +create table t1 (a int) engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); +set autocommit=0; +select * from t1 for update; +a +1 +2 +select * from information_schema.rocksdb_trx; +TRANSACTION_ID STATE NAME WRITE_COUNT LOCK_COUNT TIMEOUT_SEC WAITING_KEY WAITING_COLUMN_FAMILY_ID IS_REPLICATION SKIP_TRX_API READ_ONLY HAS_DEADLOCK_DETECTION NUM_ONGOING_BULKLOAD THREAD_ID QUERY +_TRX_ID_ STARTED _NAME_ 0 2 1 _KEY_ 0 0 0 0 0 0 _THREAD_ID_ select * from information_schema.rocksdb_trx +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result new file mode 100644 index 0000000000000..24466d982b912 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/trx_info_rpl.result @@ -0,0 +1,16 @@ +include/master-slave.inc +[connection master] +DROP TABLE IF EXISTS t1; +connection slave; +include/stop_slave.inc +connection master; +create table t1 (a int) engine=rocksdb; +connection slave; +show variables like 'rpl_skip_tx_api'; +Variable_name Value +rpl_skip_tx_api ON +include/start_slave.inc +found +connection master; +DROP TABLE t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result new file mode 100644 index 0000000000000..bbdd6d210fbdb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_binary.result @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BINARY , +b0 BINARY(0) , +b1 BINARY(1) , +b20 BINARY(20) , +b255 BINARY(255) , +pk BINARY PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b20' at row 1 +Warning 1265 Data truncated for column 'b255' at row 1 +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; +ERROR 23000: Duplicate entry 'c' for key 'PRIMARY' +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; +HEX(b) HEX(b0) HEX(b1) HEX(b20) HEX(b255) HEX(pk) +00 00 0000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 00 +61 61 6161616161616161616161616161616161616161 787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878 62 +61 62 616263646566676869206B6C6D6E6F7071727374 4372656174696E6720616E2061727469636C6520666F7220746865204B6E6F776C65646765626173652069732073696D696C617220746F2061736B696E67207175657374696F6E732E2046697273742C206E6176696761746520746F207468652063617465676F727920776865726520796F75206665656C207468652061727469636C652073686F756C642062652E204F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C726561647920657869737420776869636820776F756C6420776F726B2E00000000000000000000000000000000000000000000000000000000000000 61 +ALTER TABLE t1 ADD COLUMN b257 BINARY(257) ; +ERROR 42000: Column length too big for column 'b257' (max = 255); use BLOB or TEXT instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +b binary(1) YES NULL +b0 binary(0) YES NULL +b1 binary(1) YES NULL +b20 binary(20) YES NULL +b255 binary(255) YES NULL +pk binary(1) NO PRI NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result new file mode 100644 index 0000000000000..c5cffdc1a0df2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_binary_indexes.result @@ -0,0 +1,80 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (b BINARY, +b20 BINARY(20) PRIMARY KEY, +v16 VARBINARY(16), +v128 VARBINARY(128) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b20 A 1000 NULL NULL LSMTREE +INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); +EXPLAIN SELECT HEX(b20) FROM t1 ORDER BY b20; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index +SELECT HEX(b20) FROM t1 ORDER BY b20; +HEX(b20) +6368617231000000000000000000000000000000 +6368617232000000000000000000000000000000 +6368617233000000000000000000000000000000 +6368617234000000000000000000000000000000 +EXPLAIN SELECT HEX(b20) FROM t1 IGNORE INDEX (PRIMARY) ORDER BY b20 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +SELECT HEX(b20) FROM t1 ORDER BY b20 DESC; +HEX(b20) +6368617234000000000000000000000000000000 +6368617233000000000000000000000000000000 +6368617232000000000000000000000000000000 +6368617231000000000000000000000000000000 +DROP TABLE t1; +CREATE TABLE t1 (b BINARY, +b20 BINARY(20), +v16 VARBINARY(16), +v128 VARBINARY(128), +pk VARBINARY(10) PRIMARY KEY, +INDEX (v16(10)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 v16 1 v16 A 500 10 NULL YES LSMTREE +INSERT INTO t1 (b,b20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b',1),('a','char2','varchar2a','varchar2b',2),('b','char3','varchar1a','varchar1b',3),('c','char4','varchar3a','varchar3b',4),('d','char5','varchar4a','varchar3b',5),('e','char6','varchar2a','varchar3b',6); +INSERT INTO t1 (b,b20,v16,v128,pk) SELECT b,b20,v16,v128,pk+100 FROM t1; +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v16 v16 13 NULL # Using where +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 WHERE v16 LIKE 'varchar%'; +HEX(SUBSTRING(v16,7,3)) +723161 +723161 +723161 +723161 +723261 +723261 +723261 +723261 +723361 +723361 +723461 +723461 +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v16 v16 13 NULL # Using where +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; +HEX(SUBSTRING(v16,7,3)) +723161 +723161 +723161 +723161 +723261 +723261 +723261 +723261 +723361 +723361 +723461 +723461 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result new file mode 100644 index 0000000000000..fa84cbde8c8d1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit.result @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT , +b BIT(20) , +c BIT(64) , +d BIT(1) , +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI NULL +d bit(1) YES NULL +ALTER TABLE t1 DROP COLUMN d; +ALTER TABLE t1 ADD COLUMN d BIT(0) ; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a bit(1) YES NULL +b bit(20) YES NULL +c bit(64) NO PRI NULL +d bit(1) YES NULL +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; +BIN(a) HEX(b) c+0 +0 FFFFF 18446744073709551615 +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; +a+0 b+0 c+0 +0 1048575 18446744073709551615 +1 0 18446744073709551614 +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); +a+0 b+0 c+0 +1 0 18446744073709551614 +1 102 255 +DELETE FROM t1; +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +Warnings: +Warning 1264 Out of range value for column 'c' at row 1 +SELECT a+0,b+0,c+0,d+0 FROM t1; +a+0 b+0 c+0 d+0 +1 0 0 1 +1 0 18446744073709551615 0 +DROP TABLE t1; +CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) ) ENGINE=rocksdb; +ERROR 42000: Display width out of range for 'a' (max = 64) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result new file mode 100644 index 0000000000000..8da878eb0f22a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bit_indexes.result @@ -0,0 +1,58 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a BIT, +b BIT(20) PRIMARY KEY, +c BIT(32), +d BIT(64) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A 1000 NULL NULL LSMTREE +INSERT INTO t1 (a,b,c,d) VALUES +(0,0xFFFFF,0,1),(0,256,0xAAA,0x12345),(1,16,0,0xFFFFFFF),(0,11,12,13), +(1,100,101,102),(0,12,13,14),(1,13,14,15),(0,101,201,202),(1,1000,1001,1002), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF); +EXPLAIN SELECT b+0 FROM t1 ORDER BY b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 3 NULL # Using index +SELECT b+0 FROM t1 ORDER BY b; +b+0 +11 +12 +13 +16 +100 +101 +256 +1000 +65535 +1048575 +DROP TABLE t1; +# TODO: Unique indexes are not enforced +CREATE TABLE t1 ( +a BIT, +b BIT(20), +c BIT(32), +d BIT(64), +pk BIT(10) PRIMARY KEY, +INDEX(a) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 a 1 a A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); +EXPLAIN SELECT DISTINCT a+0 FROM t1 ORDER BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary; Using filesort +SELECT DISTINCT a+0 FROM t1 ORDER BY a; +a+0 +0 +1 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result new file mode 100644 index 0000000000000..3b2bee74b6ac9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob.result @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b BLOB , +b0 BLOB(0) , +b1 BLOB(1) , +b300 BLOB(300) , +bm BLOB(65535) , +b70k BLOB(70000) , +b17m BLOB(17000000) , +t TINYBLOB , +m MEDIUMBLOB , +l LONGBLOB +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b blob YES NULL +b0 blob YES NULL +b1 tinyblob YES NULL +b300 blob YES NULL +bm blob YES NULL +b70k mediumblob YES NULL +b17m longblob YES NULL +t tinyblob YES NULL +m mediumblob YES NULL +l longblob YES NULL +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'b0' at row 1 +Warning 1265 Data truncated for column 'b1' at row 1 +Warning 1265 Data truncated for column 'b300' at row 1 +Warning 1265 Data truncated for column 'bm' at row 1 +Warning 1265 Data truncated for column 't' at row 1 +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(b) LENGTH(b0) LENGTH(b1) LENGTH(b300) LENGTH(bm) LENGTH(b70k) LENGTH(b17m) LENGTH(t) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 2097152 255 1048576 2097152 +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); +ERROR 42000: Display width out of range for 'bbb' (max = 4294967295) +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result new file mode 100644 index 0000000000000..26726e0f6d101 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_blob_indexes.result @@ -0,0 +1,188 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +b BLOB, +t TINYBLOB, +m MEDIUMBLOB, +l LONGBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 b A 1000 32 NULL LSMTREE +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +# # # # # PRIMARY # # # # +SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; +f + + +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +# # # # # NULL # # # # +SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; +f + + +DROP TABLE t1; +CREATE TABLE t1 ( +b BLOB, +t TINYBLOB, +m MEDIUMBLOB, +l LONGBLOB, +pk INT AUTO_INCREMENT PRIMARY KEY, +UNIQUE INDEX l_t (l(256),t(64)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk # # NULL NULL # # +t1 0 l_t 1 l # # 256 NULL # # +t1 0 l_t 2 t # # 64 NULL # # +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range l_t l_t 259 NULL # Using where; Using filesort +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SUBSTRING(t,64) SUBSTRING(l,256) + + +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + + +fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + + + +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range l_t l_t 259 NULL # Using where; Using filesort +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SUBSTRING(t,64) SUBSTRING(l,256) + + +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + + +fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + + + +DROP TABLE t1; +CREATE TABLE t1 ( +b BLOB, +t TINYBLOB, +m MEDIUMBLOB, +l LONGBLOB, +pk INT AUTO_INCREMENT PRIMARY KEY, +INDEX (m(128)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 m 1 m A 500 128 NULL YES LSMTREE +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref m m 131 const # Using where; Using filesort +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +f +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref m m 131 const # Using where; Using filesort +SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; +f +DROP TABLE t1; +CREATE TABLE t1 ( +b BLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b TINYBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b MEDIUMBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGBLOB, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result new file mode 100644 index 0000000000000..4abfdb49f37ea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_bool.result @@ -0,0 +1,73 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +b1 BOOL , +b2 BOOLEAN +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +b1 tinyint(1) YES NULL +b2 tinyint(1) YES NULL +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; +b1 b2 +1 1 +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +INSERT INTO t1 (b1,b2) VALUES (2,3); +SELECT b1,b2 FROM t1; +b1 b2 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +1 1 +2 3 +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; +a b +false false +true true +true true +true true +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; +b1 b2 +1 1 +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; +b1 b2 +0 0 +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +Warnings: +Warning 1366 Incorrect integer value: 'a' for column 'b1' at row 1 +Warning 1366 Incorrect integer value: 'b' for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +2 3 +INSERT INTO t1 (b1,b2) VALUES (128,-129); +Warnings: +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b2' at row 1 +SELECT b1,b2 FROM t1; +b1 b2 +-1 -2 +0 0 +0 0 +1 1 +127 -128 +2 3 +ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED ; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNSIGNED' at line 1 +ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL ; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ADD COLUMN b3 BOOL ZEROFILL' at line 1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result new file mode 100644 index 0000000000000..1786dfae1e754 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char.result @@ -0,0 +1,76 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR , +c0 CHAR(0) , +c1 CHAR(1) , +c20 CHAR(20) , +c255 CHAR(255) , +PRIMARY KEY (c255) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +c char(1) YES NULL +c0 char(0) YES NULL +c1 char(1) YES NULL +c20 char(20) YES NULL +c255 char(255) NO PRI NULL +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +Warning 1265 Data truncated for column 'c0' at row 1 +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c20' at row 1 +Warning 1265 Data truncated for column 'c255' at row 1 +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'c' at row 5 +Warning 1265 Data truncated for column 'c0' at row 5 +Warning 1265 Data truncated for column 'c1' at row 5 +Warning 1265 Data truncated for column 'c20' at row 5 +Warning 1265 Data truncated for column 'c' at row 6 +Warning 1265 Data truncated for column 'c0' at row 6 +Warning 1265 Data truncated for column 'c1' at row 6 +Warning 1265 Data truncated for column 'c20' at row 6 +Warning 1265 Data truncated for column 'c255' at row 6 +SELECT c,c0,c1,c20,c255 FROM t1; +c c0 c1 c20 c255 + + a +C C Creating an article aCreating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work.b +a a aaaaaaaaaaaaaaaaaaaa xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +a b abcdefghi klmnopqrst Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn't already exist which would work. +x x xxxxxxxxxxxxxxxxxxxx axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; +c20 REPEAT('a',LENGTH(c20)) COUNT(*) + 2 +Creating an article aaaaaaaaaaaaaaaaaaa 1 +aaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaa 1 +abcdefghi klmnopqrst aaaaaaaaaaaaaaaaaaaa 1 +xxxxxxxxxxxxxxxxxxxx aaaaaaaaaaaaaaaaaaaa 1 +ALTER TABLE t1 ADD COLUMN c257 CHAR(257) ; +ERROR 42000: Column length too big for column 'c257' (max = 255); use BLOB or TEXT instead +DROP TABLE t1; +CREATE TABLE t1(c1 CHAR(0) NOT NULL); +INSERT INTO t1 VALUES('a'); +Warnings: +Warning 1265 Data truncated for column 'c1' at row 1 +SELECT * FROM t1; +c1 + +DROP TABLE t1; +CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key); +INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five'); +(SELECT * FROM t1 LIMIT 1) UNION (SELECT * FROM t1); +a +five +four +one +three +two +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result new file mode 100644 index 0000000000000..413c3f69f2389 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result @@ -0,0 +1,73 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +c CHAR, +c20 CHAR(20) PRIMARY KEY, +v16 VARCHAR(16), +v128 VARCHAR(128) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 c20 A 1000 NULL NULL LSMTREE +INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); +EXPLAIN SELECT c20 FROM t1 ORDER BY c20; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index +SELECT c20 FROM t1 ORDER BY c20; +c20 +char1 +char2 +char3 +char4 +EXPLAIN SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 20 NULL # Using index +SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; +c20 +char1 +char2 +char3 +char4 +DROP TABLE t1; +CREATE TABLE t1 ( +c CHAR, +c20 CHAR(20), +v16 VARCHAR(16), +v128 VARCHAR(128), +pk VARCHAR(64) PRIMARY KEY, +INDEX (v16) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 v16 1 v16 A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4'); +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index v16 v16 21 NULL # Using where; Using index +SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%'; +SUBSTRING(v16,7,3) +r1a +r1a +r2a +r3a +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT SUBSTRING(v16,7,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; +SUBSTRING(v16,7,3) +r1a +r1a +r2a +r3a +EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL v16 NULL NULL NULL # Using where; Using filesort +SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; +c c20 v16 v128 +a char1 varchar1a varchar1b +b char3 varchar1a varchar1b +c char4 varchar3a varchar3b +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result new file mode 100644 index 0000000000000..cb56089595bdc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes_collation.result @@ -0,0 +1,91 @@ +set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +create table t (id int not null auto_increment primary key, +c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, +key sk (c)); +insert into t (c) values ('☀'), ('ß'); +explain select c from t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk 27 NULL # Using index +select c from t; +c +ß +☀ +drop table t; +set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans"; +create table t (id int not null auto_increment, +c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci, +primary key (id), +key sk1 (c1), +key sk2 (c2)); +explain select hex(c1) from t order by c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk1 4 NULL # Using index +explain select hex(c1) from t IGNORE INDEX (sk1) order by c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # Using filesort +explain select hex(c2) from t order by c2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk2 2 NULL # Using index +explain select hex(c2) from t IGNORE INDEX (sk1) order by c2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL sk2 2 NULL # Using index +truncate t; +insert into t (c1, c2) values ('Asdf ', 'Asdf '); +Warnings: +Warning 1265 Data truncated for column 'c1' at row 1 +Warning 1265 Data truncated for column 'c2' at row 1 +select char_length(c1), char_length(c2), c1, c2 from t; +char_length(c1) char_length(c2) c1 c2 +1 1 A A +drop table t; +create table t (id int not null auto_increment, +c2 char(255) CHARACTER SET latin1 COLLATE latin1_general_ci, +primary key (id), +unique key sk2 (c2)); +insert into t (c2) values ('Asdf'); +insert into t (c2) values ('asdf '); +ERROR 23000: Duplicate entry 'asdf' for key 'sk2' +drop table t; +create table t (id int not null auto_increment, +c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +primary key (id), +unique key sk1 (c1)); +insert into t (c1) values ('Asdf'); +insert into t (c1) values ('asdf '); +ERROR 23000: Duplicate entry 'asdf ' for key 'sk1' +insert into t (c1) values ('asdf'); +ERROR 23000: Duplicate entry 'asdf' for key 'sk1' +drop table t; +create table t (id int not null auto_increment, +c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, +primary key (id), +unique key sk1 (c1(1))); +insert into t (c1) values ('Asdf'); +insert into t (c1) values ('bbbb '); +insert into t (c1) values ('a '); +ERROR 23000: Duplicate entry 'a' for key 'sk1' +explain select c1 from t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # +select c1 from t; +c1 +Asdf +bbbb +drop table t; +set session rocksdb_verify_row_debug_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; +insert into t values (1, ' a'); +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; +create table t (id int primary key, email varchar(767), KEY email_i (email)) engine=rocksdb default charset=latin1; +insert into t values (1, REPEAT('a', 700)); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result new file mode 100644 index 0000000000000..91e7ac257c3fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time.result @@ -0,0 +1,56 @@ +DROP TABLE IF EXISTS t1; +set @save_time_zone=@@time_zone; +set time_zone='UTC'; +CREATE TABLE t1 ( +d DATE , +dt DATETIME , +ts TIMESTAMP , +t TIME , +y YEAR , +y4 YEAR(4) , +y2 YEAR(2) , +pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; +Warnings: +Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d date YES NULL +dt datetime YES NULL +ts timestamp YES NULL +t time YES NULL +y year(4) YES NULL +y4 year(4) YES NULL +y2 year(2) YES NULL +pk datetime NO PRI NULL +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); +Warnings: +Warning 1265 Data truncated for column 'd' at row 1 +Warning 1265 Data truncated for column 'dt' at row 1 +Warning 1265 Data truncated for column 'ts' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 'y' at row 1 +Warning 1264 Out of range value for column 'y4' at row 1 +Warning 1264 Out of range value for column 'y2' at row 1 +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +d dt ts t y y4 y2 +1000-01-01 1000-01-01 00:00:00 1970-01-01 00:00:01 -838:59:59 1901 1901 00 +9999-12-31 9999-12-31 23:59:59 2038-01-19 03:14:07 838:59:59 2155 2155 99 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 00:00:00 2000 2000 00 +2012-04-09 2012-04-09 05:27:00 2012-04-09 05:27:00 05:27:00 2012 2012 12 +0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 -838:59:59 0000 0000 00 +set time_zone=@save_time_zone; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result new file mode 100644 index 0000000000000..120d0d81b55e7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result @@ -0,0 +1,119 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DATE, +dt DATETIME PRIMARY KEY, +ts TIMESTAMP, +t TIME, +y YEAR +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 dt A 1000 NULL NULL LSMTREE +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm)); +EXPLAIN SELECT dt FROM t1 ORDER BY dt LIMIT 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index +SELECT dt FROM t1 ORDER BY dt LIMIT 3; +dt +2010-11-22 11:43:14 +2010-11-22 12:33:54 +2011-08-27 21:33:56 +EXPLAIN SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index +SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; +dt +2010-11-22 11:43:14 +2010-11-22 12:33:54 +2011-08-27 21:33:56 +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-11', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'); +ERROR 23000: Duplicate entry '2010-11-22 12:33:54' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 ( +d DATE, +dt DATETIME, +ts TIMESTAMP, +t TIME, +y YEAR, +pk TIME PRIMARY KEY, +INDEX (ts) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 ts 1 ts A 500 NULL NULL YES LSMTREE +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','12:00:00'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','12:01:00'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','12:02:00'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','12:03:00'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','12:04:00'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00'); +EXPLAIN SELECT ts FROM t1 WHERE ts > NOW(); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index ts ts 5 NULL # Using where; Using index +SELECT ts FROM t1 WHERE ts > NOW(); +ts +EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); +ts +DROP TABLE t1; +CREATE TABLE t1 ( +d DATE, +dt DATETIME, +ts TIMESTAMP, +t TIME, +y YEAR, +pk TIME PRIMARY KEY, +INDEX (y,t) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 y 1 y A 250 NULL NULL YES LSMTREE +t1 1 y 2 t A 500 NULL NULL YES LSMTREE +SET @tm = '2012-04-09 05:27:00'; +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','18:18:18'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','19:18:18'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','20:18:18'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','21:18:18'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','22:18:18'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18'); +EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL y 6 NULL # Using index +SELECT y, COUNT(*) FROM t1 GROUP BY y; +y COUNT(*) +1994 1 +1998 1 +1999 1 +2000 1 +2001 1 +2012 1 +EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL y 6 NULL # Using index; Using temporary; Using filesort +SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; +y COUNT(*) +1994 1 +1998 1 +1999 1 +2000 1 +2001 1 +2012 1 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result new file mode 100644 index 0000000000000..7397ff64ab185 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result @@ -0,0 +1,103 @@ +drop table if exists t1, t2; +# +# Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. +# (Decoding happens from the mem-comparable image in the index, regardless +# of whether the value part has original value or not) +# +create table t1 ( +pk int not null primary key, +col1 decimal (2,1) signed, +col2 decimal (2,1) unsigned, +filler varchar(100), +key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-9.1, 0.7, 'filler'), +(2,-8.2, 1.6, 'filler'), +(3, 0.3, 2.5, 'filler'), +(4, 1.4, 3.4, 'filler'), +(5, 2.5, 4.3, 'filler'), +(6, 3.3, 5.3, 'filler'); +insert into t1 select pk+100, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+200, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+1000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +# The following can't use index-only: +explain select * from t1 where col1 between -8 and 8; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 3 NULL # Using index condition +# This will use index-only: +explain +select col1, col2 from t1 where col1 between -8 and 8; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 3 NULL # Using where; Using index +select col1, col2 from t1 where col1 between -8 and 8; +col1 col2 +0.3 2.5 +1.4 3.4 +2.5 4.3 +3.3 5.3 +insert into t1 values (11, NULL, 0.9, 'row1-with-null'); +insert into t1 values (10, -8.4, NULL, 'row2-with-null'); +explain +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 3 NULL # Using where; Using index +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; +col1 col2 +NULL 0.9 +-9.1 0.7 +-8.4 NULL +-8.2 1.6 +# Try an UPDATE +select * from t1 where pk in (3,4); +pk col1 col2 filler +3 0.3 2.5 filler +4 1.4 3.4 filler +update t1 set col2= col2+0.2 where pk in (3,4); +select * from t1 where pk in (3,4); +pk col1 col2 filler +3 0.3 2.7 filler +4 1.4 3.6 filler +drop table t1; +# +# Try another DECIMAL-based type that takes more space +# +create table t1 ( +pk int not null primary key, +col1 decimal (12,6) signed, +col2 decimal (12,6) unsigned, +filler varchar(100), +key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-900.001, 000.007, 'filler'), +(2,-700.002, 100.006, 'filler'), +(3, 000.003, 200.005, 'filler'), +(4, 100.004, 300.004, 'filler'), +(5, 200.005, 400.003, 'filler'), +(6, 300.003, 500.003, 'filler'); +insert into t1 select pk+100, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+200, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+1000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +explain +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range key1 key1 7 NULL # Using where; Using index +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +col1 col2 +-700.002000 100.006000 +0.003000 200.005000 +100.004000 300.004000 +200.005000 400.003000 +300.003000 500.003000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result new file mode 100644 index 0000000000000..ed66303cc0f80 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum.result @@ -0,0 +1,47 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('') , +b ENUM('test1','test2','test3','test4','test5') , +c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') , +PRIMARY KEY (b) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; +a b c + test2 4 + test5 2 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + + test2 4 + test5 2 +ALTER TABLE t1 ADD COLUMN e ENUM('a','A') ; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in ENUM +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a enum('') YES NULL +b enum('test1','test2','test3','test4','test5') NO PRI NULL +c enum('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') YES NULL +e enum('a','A') YES NULL +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +SELECT a,b,c,e FROM t1; +a b c e + NULL + test2 4 NULL + test3 75 a + test5 2 NULL +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; +a b c e + test2 4 NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result new file mode 100644 index 0000000000000..70bbc840454aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result @@ -0,0 +1,69 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), +b ENUM('test1','test2','test3','test4','test5'), +c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z') PRIMARY KEY +) ENGINE=rocksdb; +INSERT INTO t1 (a,b,c) VALUES +('N.America','test1','5a'),('Europe','test1','5b'),('Europe','test2','6v'), +('Africa','test3','4z'),('Africa','test4','1j'),('Antarctica','test4','1d'); +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 c A 1000 NULL NULL LSMTREE +EXPLAIN SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY PRIMARY 1 NULL # Using where; Using index +SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; +c +1d +1j +4z +5a +5b +EXPLAIN SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; +c +1d +1j +4z +5a +5b +DROP TABLE t1; +CREATE TABLE t1 ( +a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), +b ENUM('test1','test2','test3','test4','test5'), +c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'), +pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY, +INDEX(b) +) ENGINE=rocksdb; +INSERT INTO t1 (a,b,c,pk) VALUES +('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3), +('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6); +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 b 1 b A 500 NULL NULL YES LSMTREE +EXPLAIN SELECT DISTINCT b FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL b 2 NULL # +SELECT DISTINCT b FROM t1; +b +test1 +test2 +test3 +test4 +EXPLAIN SELECT DISTINCT b FROM t1 IGNORE INDEX (b); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary +SELECT DISTINCT b FROM t1 IGNORE INDEX (b); +b +test1 +test2 +test3 +test4 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result new file mode 100644 index 0000000000000..055952ea55f88 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed.result @@ -0,0 +1,131 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d DECIMAL , +d0 DECIMAL(0) , +d1_1 DECIMAL(1,1) , +d10_2 DECIMAL(10,2) , +d60_10 DECIMAL(60,10) , +n NUMERIC , +n0_0 NUMERIC(0,0) , +n1 NUMERIC(1) , +n20_4 NUMERIC(20,4) , +n65_4 NUMERIC(65,4) , +pk NUMERIC PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +d decimal(10,0) YES NULL +d0 decimal(10,0) YES NULL +d1_1 decimal(1,1) YES NULL +d10_2 decimal(10,2) YES NULL +d60_10 decimal(60,10) YES NULL +n decimal(10,0) YES NULL +n0_0 decimal(10,0) YES NULL +n1 decimal(1,0) YES NULL +n20_4 decimal(20,4) YES NULL +n65_4 decimal(65,4) YES NULL +pk decimal(10,0) NO PRI NULL +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +6 +); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +Warnings: +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1264 Out of range value for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Warning 1264 Out of range value for column 'd10_2' at row 1 +Warning 1264 Out of range value for column 'd60_10' at row 1 +Warning 1264 Out of range value for column 'n' at row 1 +Warning 1264 Out of range value for column 'n0_0' at row 1 +Warning 1264 Out of range value for column 'n1' at row 1 +Warning 1264 Out of range value for column 'n20_4' at row 1 +Warning 1264 Out of range value for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +Warnings: +Note 1265 Data truncated for column 'd' at row 1 +Note 1265 Data truncated for column 'd0' at row 1 +Warning 1264 Out of range value for column 'd1_1' at row 1 +Note 1265 Data truncated for column 'd10_2' at row 1 +Note 1265 Data truncated for column 'd60_10' at row 1 +Note 1265 Data truncated for column 'n' at row 1 +Note 1265 Data truncated for column 'n0_0' at row 1 +Note 1265 Data truncated for column 'n1' at row 1 +Note 1265 Data truncated for column 'n20_4' at row 1 +Note 1265 Data truncated for column 'n65_4' at row 1 +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; +d d0 d1_1 d10_2 d60_10 n n0_0 n1 n20_4 n65_4 +-100 -123456 -0.3 -40000.25 -123456789123456789.1000100000 -1024 -7000 -8 -999999.9000 -9223372036854775807.0000 +-9999999999 -9999999999 -0.9 -99999999.99 -99999999999999999999999999999999999999999999999999.9999999999 -9999999999 -9999999999 -9 -9999999999999999.9999 -9999999999999999999999999999999999999999999999999999999999999.9999 +0 0 0.0 0.00 0.0000000000 0 0 0 0.0000 0.0000 +100 123456 0.3 40000.25 123456789123456789.1000100000 1024 7000 8 999999.9000 9223372036854775807.0000 +9999999999 9999999999 0.9 99999999.00 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.0000 9999999999999999999999999999999999999999999999999999999999999.1111 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +9999999999 9999999999 0.9 99999999.99 99999999999999999999999999999999999999999999999999.9999999999 9999999999 9999999999 9 9999999999999999.9999 9999999999999999999999999999999999999999999999999999999999999.9999 +ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) ; +ERROR 42000: Too big precision 66 specified for 'n66'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) ; +ERROR 42000: Too big precision 66 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) ; +ERROR 42000: Too big scale 66 specified for 'n66_66'. Maximum is 38 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result new file mode 100644 index 0000000000000..3f6a0f5d2b9af --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_fixed_indexes.result @@ -0,0 +1,129 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +d1 DECIMAL(10,2) PRIMARY KEY, +d2 DECIMAL(60,10), +n1 NUMERIC, +n2 NUMERIC(65,4) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 d1 A 1000 NULL NULL LSMTREE +INSERT INTO t1 (d1,d2,n1,n2) VALUES +(10.22,60.12345,123456,14.3456), +(10.0,60.12345,123456,14), +(11.14,15,123456,13), +(100,100,1,2), +(0,0,0,0), +(4540424564.23,3343303441.0,12,13), +(15,17,23,100000); +Warnings: +Warning 1264 Out of range value for column 'd1' at row 6 +EXPLAIN SELECT d1 FROM t1 ORDER BY d1 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index +SELECT d1 FROM t1 ORDER BY d1 DESC; +d1 +99999999.99 +100.00 +15.00 +11.14 +10.22 +10.00 +0.00 +EXPLAIN SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 5 NULL # Using index; Using filesort +SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; +d1 +99999999.99 +100.00 +15.00 +11.14 +10.22 +10.00 +0.00 +DROP TABLE t1; +CREATE TABLE t1 ( +d1 DECIMAL(10,2), +d2 DECIMAL(60,10), +n1 NUMERIC, +n2 NUMERIC(65,4), +pk NUMERIC PRIMARY KEY, +UNIQUE INDEX n1_n2 (n1,n2) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 n1_n2 1 n1 A 500 NULL NULL YES LSMTREE +t1 0 n1_n2 2 n2 A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); +Warnings: +Warning 1264 Out of range value for column 'd1' at row 6 +EXPLAIN SELECT DISTINCT n1+n2 FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL n1_n2 37 NULL # Using index; Using temporary +SELECT DISTINCT n1+n2 FROM t1; +n1+n2 +0.0000 +100023.0000 +123469.0000 +123470.0000 +123470.3456 +25.0000 +3.0000 +DROP TABLE t1; +CREATE TABLE t1 ( +d1 DECIMAL(10,2), +d2 DECIMAL(60,10), +n1 NUMERIC, +n2 NUMERIC(65,4), +pk DECIMAL(20,10) PRIMARY KEY, +INDEX (d2) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 d2 1 d2 A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); +Warnings: +Warning 1264 Out of range value for column 'd1' at row 6 +EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL d2 29 NULL # Using index +SELECT d2, COUNT(*) FROM t1 GROUP BY d2; +d2 COUNT(*) +0.0000000000 1 +100.0000000000 1 +15.0000000000 1 +17.0000000000 1 +3343303441.0000000000 1 +60.1234500000 2 +EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL d2 29 NULL # Using index; Using temporary; Using filesort +SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; +d2 COUNT(*) +0.0000000000 1 +100.0000000000 1 +15.0000000000 1 +17.0000000000 1 +3343303441.0000000000 1 +60.1234500000 2 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result new file mode 100644 index 0000000000000..fbb44d1552c81 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float.result @@ -0,0 +1,304 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT , +f0 FLOAT(0) , +r1_1 REAL(1,1) , +f23_0 FLOAT(23) , +f20_3 FLOAT(20,3) , +d DOUBLE , +d1_0 DOUBLE(1,0) , +d10_10 DOUBLE PRECISION (10,10) , +d53 DOUBLE(53,0) , +d53_10 DOUBLE(53,10) , +pk DOUBLE PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +f float YES NULL +f0 float YES NULL +r1_1 double(1,1) YES NULL +f23_0 float YES NULL +f20_3 float(20,3) YES NULL +d double YES NULL +d1_0 double(1,0) YES NULL +d10_10 double(10,10) YES NULL +d53 double(53,0) YES NULL +d53_10 double(53,10) YES NULL +pk double NO PRI NULL +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 11111111.111 +d10_10 0.0123456789 +d1_0 8 +d53 1234566789123456800 +d53_10 100000000000000000.0000000000 +f0 12345.1 +f20_3 56789.988 +f23_0 123457000 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +99999999999999999999999999999999999999, +99999999999999999999999999999999999999.9999999999999999, +0.9, +99999999999999999999999999999999999999.9, +99999999999999999.999, +999999999999999999999999999999999999999999999999999999999999999999999999999999999, +9, +0.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +3 +); +Warnings: +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d 0 +d 11111111.111 +d 1e81 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 0 +d1_0 8 +d1_0 9 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f 0 +f 1e38 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1; +MAX(f) 9.999999680285692e37 +MAX(d) 1e81 +MAX(d10_10) 0.9999999999 +MAX(d1_0) 9 +MAX(d53) 100000000000000000000000000000000000000000000000000000 +MAX(d53_10) 10000000000000000000000000000000000000000000.0000000000 +MAX(f0) 9.999999680285692e37 +MAX(f20_3) 99999998430674940.000 +MAX(f23_0) 9.999999680285692e37 +MAX(r1_1) 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +9999999999999999999999999999999999999999999999999999999999999.9999, +5 +); +Warnings: +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( +999999999999999999999999999999999999999, +999999999999999999999999999999999999999.9999999999999999, +1.9, +999999999999999999999999999999999999999.9, +999999999999999999.999, +9999999999999999999999999999999999999999999999999999999999999999999999999999999999, +99, +1.9999999999, +1999999999999999999999999999999999999999999999999999999, +19999999999999999999999999999999999999999999.9999999999, +6 +); +Warnings: +Warning 1916 Got overflow when converting '' to DECIMAL. Value truncated +Warning 1264 Out of range value for column 'f' at row 1 +Warning 1264 Out of range value for column 'f0' at row 1 +Warning 1264 Out of range value for column 'r1_1' at row 1 +Warning 1264 Out of range value for column 'f23_0' at row 1 +Warning 1264 Out of range value for column 'f20_3' at row 1 +Warning 1264 Out of range value for column 'd1_0' at row 1 +Warning 1264 Out of range value for column 'd10_10' at row 1 +Warning 1264 Out of range value for column 'd53' at row 1 +Warning 1264 Out of range value for column 'd53_10' at row 1 +SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1; +f 12345.1 +d -1e60 +d 0 +d 11111111.111 +d 1e61 +d 1e65 +d 1e81 +d10_10 -0.9999999999 +d10_10 0.0000000000 +d10_10 0.0123456789 +d10_10 0.9999999999 +d10_10 0.9999999999 +d10_10 0.9999999999 +d1_0 -9 +d1_0 0 +d1_0 8 +d1_0 9 +d1_0 9 +d1_0 9 +d53 -1000000000000000000000000000000 +d53 0 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 100000000000000000000000000000000000000000000000000000 +d53 1234566789123456800 +d53_10 -10000000000000000000000000000000000000000000.0000000000 +d53_10 0.0000000000 +d53_10 100000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +d53_10 10000000000000000000000000000000000000000000.0000000000 +f -1e24 +f 0 +f 1e38 +f 3.40282e38 +f 3.40282e38 +f0 -100000000000 +f0 0 +f0 12345.1 +f0 1e38 +f0 3.40282e38 +f0 3.40282e38 +f20_3 -99999998430674940.000 +f20_3 0.000 +f20_3 56789.988 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f20_3 99999998430674940.000 +f23_0 -1000 +f23_0 0 +f23_0 123457000 +f23_0 1e38 +f23_0 3.40282e38 +f23_0 3.40282e38 +r1_1 -0.9 +r1_1 0.0 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +r1_1 0.9 +ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) ; +ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) ; +ERROR 42000: Too big precision 256 specified for 'n66_6'. Maximum is 65 +ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) ; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result new file mode 100644 index 0000000000000..99d6bbe45b905 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result @@ -0,0 +1,189 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +f FLOAT PRIMARY KEY, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 f A 1000 NULL NULL LSMTREE +INSERT INTO t1 (f,r,d,dp) VALUES +(1.2345,1422.22,1.2345,1234567.89), +(0,0,0,0), +(-1,-1,-1,-1), +(17.5843,4953453454.44,29229114.0,1111111.23), +(4644,1422.22,466664.999,0.5); +EXPLAIN SELECT f FROM t1 ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index +SELECT f FROM t1 ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +EXPLAIN SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort +SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk DOUBLE PRIMARY KEY, +UNIQUE KEY r_dp (r,dp) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 r_dp 1 r A 500 NULL NULL YES LSMTREE +t1 0 r_dp 2 dp A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +EXPLAIN SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index r_dp r_dp 18 NULL # Using where; Using index +SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; +r dp +1422.220 0.5000000000 +1422.220 1234567.8900000000 +4953453454.440 1111111.2300000000 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +UNIQUE KEY(d) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 d 1 d A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL d 9 NULL # Using index +SELECT DISTINCT d FROM t1 ORDER BY d; +d +-1 +0 +1.2345 +466664.999 +29229114 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +KEY(d) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 d 1 d A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL d 9 NULL # Using index +SELECT DISTINCT d FROM t1 ORDER BY d; +d +-1 +0 +1.2345 +466664.999 +29229114 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +UNIQUE KEY(f) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 f 1 f A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); +ERROR 23000: Duplicate entry '1.2345' for key 'f' +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL f 5 NULL # Using index +SELECT DISTINCT f FROM t1 ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +DROP TABLE t1; +CREATE TABLE t1 ( +f FLOAT, +r REAL(20,3), +d DOUBLE, +dp DOUBLE PRECISION (64,10), +pk FLOAT PRIMARY KEY, +KEY(f) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 f 1 f A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL f 5 NULL # Using index +SELECT DISTINCT f FROM t1 ORDER BY f; +f +-1 +0 +1.2345 +17.5843 +4644 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result new file mode 100644 index 0000000000000..306042912d0ed --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int.result @@ -0,0 +1,212 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT , +i0 INT(0) , +i1 INT(1) , +i20 INT(20) , +t TINYINT , +t0 TINYINT(0) , +t1 TINYINT(1) , +t20 TINYINT(20) , +s SMALLINT , +s0 SMALLINT(0) , +s1 SMALLINT(1) , +s20 SMALLINT(20) , +m MEDIUMINT , +m0 MEDIUMINT(0) , +m1 MEDIUMINT(1) , +m20 MEDIUMINT(20) , +b BIGINT , +b0 BIGINT(0) , +b1 BIGINT(1) , +b20 BIGINT(20) , +pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +i int(11) YES NULL +i0 int(11) YES NULL +i1 int(1) YES NULL +i20 int(20) YES NULL +t tinyint(4) YES NULL +t0 tinyint(4) YES NULL +t1 tinyint(1) YES NULL +t20 tinyint(20) YES NULL +s smallint(6) YES NULL +s0 smallint(6) YES NULL +s1 smallint(1) YES NULL +s20 smallint(20) YES NULL +m mediumint(9) YES NULL +m0 mediumint(9) YES NULL +m1 mediumint(1) YES NULL +m20 mediumint(20) YES NULL +b bigint(20) YES NULL +b0 bigint(20) YES NULL +b1 bigint(1) YES NULL +b20 bigint(20) YES NULL +pk int(11) NO PRI NULL auto_increment +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); +Warnings: +Warning 1264 Out of range value for column 'i' at row 1 +Warning 1264 Out of range value for column 'i0' at row 1 +Warning 1264 Out of range value for column 'i1' at row 1 +Warning 1264 Out of range value for column 'i20' at row 1 +Warning 1264 Out of range value for column 't' at row 1 +Warning 1264 Out of range value for column 't0' at row 1 +Warning 1264 Out of range value for column 't1' at row 1 +Warning 1264 Out of range value for column 't20' at row 1 +Warning 1264 Out of range value for column 's' at row 1 +Warning 1264 Out of range value for column 's0' at row 1 +Warning 1264 Out of range value for column 's1' at row 1 +Warning 1264 Out of range value for column 's20' at row 1 +Warning 1264 Out of range value for column 'm' at row 1 +Warning 1264 Out of range value for column 'm0' at row 1 +Warning 1264 Out of range value for column 'm1' at row 1 +Warning 1264 Out of range value for column 'm20' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'b0' at row 1 +Warning 1264 Out of range value for column 'b1' at row 1 +Warning 1264 Out of range value for column 'b20' at row 1 +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); +Warnings: +Warning 1264 Out of range value for column 'i' at row 8 +Warning 1264 Out of range value for column 'i0' at row 8 +Warning 1264 Out of range value for column 'i1' at row 8 +Warning 1264 Out of range value for column 'i20' at row 8 +Warning 1264 Out of range value for column 't' at row 8 +Warning 1264 Out of range value for column 't0' at row 8 +Warning 1264 Out of range value for column 't1' at row 8 +Warning 1264 Out of range value for column 't20' at row 8 +Warning 1264 Out of range value for column 's' at row 8 +Warning 1264 Out of range value for column 's0' at row 8 +Warning 1264 Out of range value for column 's1' at row 8 +Warning 1264 Out of range value for column 's20' at row 8 +Warning 1264 Out of range value for column 'm' at row 8 +Warning 1264 Out of range value for column 'm0' at row 8 +Warning 1264 Out of range value for column 'm1' at row 8 +Warning 1264 Out of range value for column 'm20' at row 8 +Warning 1264 Out of range value for column 'i' at row 9 +Warning 1264 Out of range value for column 'i0' at row 9 +Warning 1264 Out of range value for column 'i1' at row 9 +Warning 1264 Out of range value for column 'i20' at row 9 +Warning 1264 Out of range value for column 't' at row 9 +Warning 1264 Out of range value for column 't0' at row 9 +Warning 1264 Out of range value for column 't1' at row 9 +Warning 1264 Out of range value for column 't20' at row 9 +Warning 1264 Out of range value for column 's' at row 9 +Warning 1264 Out of range value for column 's0' at row 9 +Warning 1264 Out of range value for column 's1' at row 9 +Warning 1264 Out of range value for column 's20' at row 9 +Warning 1264 Out of range value for column 'm' at row 9 +Warning 1264 Out of range value for column 'm0' at row 9 +Warning 1264 Out of range value for column 'm1' at row 9 +Warning 1264 Out of range value for column 'm20' at row 9 +Warning 1264 Out of range value for column 'i' at row 10 +Warning 1264 Out of range value for column 'i0' at row 10 +Warning 1264 Out of range value for column 'i1' at row 10 +Warning 1264 Out of range value for column 'i20' at row 10 +Warning 1264 Out of range value for column 't' at row 10 +Warning 1264 Out of range value for column 't0' at row 10 +Warning 1264 Out of range value for column 't1' at row 10 +Warning 1264 Out of range value for column 't20' at row 10 +Warning 1264 Out of range value for column 's' at row 10 +Warning 1264 Out of range value for column 's0' at row 10 +Warning 1264 Out of range value for column 's1' at row 10 +Warning 1264 Out of range value for column 's20' at row 10 +Warning 1264 Out of range value for column 'm' at row 10 +Warning 1264 Out of range value for column 'm0' at row 10 +Warning 1264 Out of range value for column 'm1' at row 10 +Warning 1264 Out of range value for column 'm20' at row 10 +Warning 1264 Out of range value for column 'i' at row 11 +Warning 1264 Out of range value for column 'i0' at row 11 +Warning 1264 Out of range value for column 'i1' at row 11 +Warning 1264 Out of range value for column 'i20' at row 11 +Warning 1264 Out of range value for column 't' at row 11 +Warning 1264 Out of range value for column 't0' at row 11 +Warning 1264 Out of range value for column 't1' at row 11 +Warning 1264 Out of range value for column 't20' at row 11 +Warning 1264 Out of range value for column 's' at row 11 +Warning 1264 Out of range value for column 's0' at row 11 +Warning 1264 Out of range value for column 's1' at row 11 +Warning 1264 Out of range value for column 's20' at row 11 +Warning 1264 Out of range value for column 'm' at row 11 +Warning 1264 Out of range value for column 'm0' at row 11 +Warning 1264 Out of range value for column 'm1' at row 11 +Warning 1264 Out of range value for column 'm20' at row 11 +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; +i i0 i1 i20 t t0 t1 t20 s s0 s1 s20 m m0 m1 m20 b b0 b1 b20 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +-2147483648 -2147483648 -2147483648 -2147483648 -128 -128 -128 -128 -32768 -32768 -32768 -32768 -8388608 -8388608 -8388608 -8388608 -9223372036854775808 -9223372036854775808 -9223372036854775808 -9223372036854775808 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +2147483647 2147483647 2147483647 2147483647 127 127 127 127 32767 32767 32767 32767 8388607 8388607 8388607 8388607 9223372036854775807 9223372036854775807 9223372036854775807 9223372036854775807 +ALTER TABLE t1 ADD COLUMN i257 INT(257) ; +ERROR 42000: Display width out of range for 'i257' (max = 255) +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result new file mode 100644 index 0000000000000..e4c47f04c24cf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_int_indexes.result @@ -0,0 +1,99 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +i INT PRIMARY KEY, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +EXPLAIN SELECT i FROM t1 ORDER BY i; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index +SELECT i FROM t1 ORDER BY i; +i +1 +2 +3 +5 +10 +11 +12 +101 +1000 +10001 +DROP TABLE t1; +CREATE TABLE t1 ( +i INT, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT, +pk SMALLINT AUTO_INCREMENT PRIMARY KEY, +INDEX s_m (s,m) +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +EXPLAIN SELECT s, m FROM t1 WHERE s != 10 AND m != 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index s_m s_m 7 NULL # Using where; Using index +SELECT s, m FROM t1 WHERE s != 10 AND m != 1; +s m +10000 1000000 +10000 1000000 +10002 10003 +103 104 +12 13 +13 14 +14 15 +3 4 +4 5 +5 6 +DROP TABLE t1; +# RocksDB: unique indexes allowed +CREATE TABLE t1 ( +i INT, +t TINYINT, +s SMALLINT, +m MEDIUMINT, +b BIGINT, +pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, +UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +SELECT b+t FROM t1 FORCE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +SELECT b+t FROM t1 IGNORE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +b+t +9 +11 +25 +27 +29 +207 +10107 +100000000000000100 +1000000000000000100 +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result new file mode 100644 index 0000000000000..5f875d147ec22 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set.result @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('') , +b SET('test1','test2','test3','test4','test5') , +c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') , +PRIMARY KEY (c) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +Warnings: +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +Warning 1265 Data truncated for column 'c' at row 1 +SELECT a,b,c FROM t1; +a b c + 01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50'51,52,53,54,55,56,57,58,59,60,61,62,63,64 + test1,test3 02 + test2,test3 01,23,34,44 + test2,test4 +ALTER TABLE t1 ADD COLUMN e SET('a','A') ; +Warnings: +Note 1291 Column 'e' has duplicated value 'a' in SET +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +a set('') YES NULL +b set('test1','test2','test3','test4','test5') YES NULL +c set('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') NO PRI NULL +e set('a','A') YES NULL +ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') ; +ERROR HY000: Too many strings for column f and SET +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; +a b c e + test2,test3 01,23,34,44 NULL + test2,test4 NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result new file mode 100644 index 0000000000000..62a3004e5844e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result @@ -0,0 +1,80 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), +b SET('test1','test2','test3','test4','test5'), +c SET('01','22','23','33','34','39','40','44','50','63','64') PRIMARY KEY +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 c A 1000 NULL NULL LSMTREE +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,23'), +('',5,2), +('N.America,Asia','test4,test2',''), +('Africa,Europe,Asia','test2,test3','01'), +('Antarctica','test3','34,44'), +('Asia','test5','50'), +('Europe,S.America','test1,','39'); +Warnings: +Warning 1265 Data truncated for column 'b' at row 7 +EXPLAIN SELECT c FROM t1 ORDER BY c; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 2 NULL # Using index +SELECT c FROM t1 ORDER BY c; +c + +01 +22 +39 +34,44 +01,23,34,44 +50 +EXPLAIN SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 2 NULL # Using index; Using filesort +SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; +c + +01 +22 +39 +34,44 +01,23,34,44 +50 +INSERT INTO t1 (a,b,c) VALUES ('Antarctica','test3','02'); +ERROR 23000: Duplicate entry '22' for key 'PRIMARY' +INSERT INTO t1 (a,b,c) VALUES ('','test1','34,44'); +ERROR 23000: Duplicate entry '34,44' for key 'PRIMARY' +DROP TABLE t1; +CREATE TABLE t1 ( +a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), +b SET('test1','test2','test3','test4','test5'), +c SET('01','22','23','33','34','39','40','44','50','63','64'), +pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, +INDEX(a) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 1 a 1 a A 500 NULL NULL YES LSMTREE +INSERT INTO t1 (a,b,c,pk) VALUES +('','test2,test3','01,34,44,23',1), +('',5,2,2), +('N.America,Asia','test4,test2','',3), +('Africa,Europe,Asia','test2,test3','01',4), +('Antarctica','test3','34,44',5), +('Asia','test5','50',6), +('Europe,S.America','test1,','39',7); +Warnings: +Warning 1265 Data truncated for column 'b' at row 7 +EXPLAIN SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where +SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; +a +Africa,Europe,Asia +S.America,Europe +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result new file mode 100644 index 0000000000000..4e118bb9bc578 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text.result @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT PRIMARY KEY, +t TEXT , +t0 TEXT(0) , +t1 TEXT(1) , +t300 TEXT(300) , +tm TEXT(65535) , +t70k TEXT(70000) , +t17m TEXT(17000000) , +tt TINYTEXT , +m MEDIUMTEXT , +l LONGTEXT +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +pk int(11) NO PRI NULL auto_increment +t text YES NULL +t0 text YES NULL +t1 tinytext YES NULL +t300 text YES NULL +tm text YES NULL +t70k mediumtext YES NULL +t17m longtext YES NULL +tt tinytext YES NULL +m mediumtext YES NULL +l longtext YES NULL +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); +Warnings: +Warning 1265 Data truncated for column 't' at row 1 +Warning 1265 Data truncated for column 't0' at row 1 +Warning 1265 Data truncated for column 't1' at row 1 +Warning 1265 Data truncated for column 't300' at row 1 +Warning 1265 Data truncated for column 'tm' at row 1 +Warning 1265 Data truncated for column 'tt' at row 1 +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; +LENGTH(t) LENGTH(t0) LENGTH(t1) LENGTH(t300) LENGTH(tm) LENGTH(t70k) LENGTH(t17m) LENGTH(tt) LENGTH(m) LENGTH(l) +0 0 0 0 0 0 0 0 0 0 +1 1 1 1 1 1 1 1 1 1 +5 5 5 5 5 5 5 5 5 6 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +65535 65535 255 65535 65535 1048576 1048576 255 1048576 1048576 +ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) ; +ERROR 42000: Display width out of range for 'ttt' (max = 4294967295) +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result new file mode 100644 index 0000000000000..22318316596d2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_text_indexes.result @@ -0,0 +1,165 @@ +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +t TEXT, +tt TINYTEXT, +m MEDIUMTEXT, +l LONGTEXT, +PRIMARY KEY t (t(32)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 t A 1000 32 NULL LSMTREE +INSERT INTO t1 (t,tt,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range PRIMARY PRIMARY 34 NULL # Using where; Using filesort +SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; +f + + +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where; Using filesort +SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +f + + +DROP TABLE t1; +CREATE TABLE t1 ( +t TEXT, +tt TINYTEXT, +m MEDIUMTEXT, +l LONGTEXT, +pk TINYTEXT PRIMARY KEY, +UNIQUE INDEX l_tt (l(256),tt(64)) +) ENGINE=rocksdb; +ERROR 42000: BLOB/TEXT column 'pk' used in key specification without a key length +CREATE TABLE t1 ( +t TEXT, +tt TINYTEXT, +m MEDIUMTEXT, +l LONGTEXT, +pk MEDIUMTEXT, +PRIMARY KEY mt (pk(1)), +INDEX (m(128)) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 1 NULL LSMTREE +t1 1 m 1 m A 500 128 NULL YES LSMTREE +INSERT INTO t1 (t,tt,m,l,pk) VALUES +('','','','','0'), +('a','b','c','d','1'), +('b','d','c','b','2'), +('test1','test2','test3','test4','3'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128),'4'), +('abc','def','ghi','jkl','5'), +('test2','test3','test4','test5','6'), +('test3','test4','test5','test6','7'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128),'8'), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128),'9'); +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref m m 131 const # Using where; Using filesort +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +f +DROP TABLE t1; +CREATE TABLE t1 ( +b TEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b TINYTEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b MEDIUMTEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGTEXT, +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +SELECT b FROM t1; +b +00000000000000000000000000000000 +00000000000000000000000000000001 +00000000000000000000000000000002 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "binary" COLLATE "binary", +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); +SELECT hex(b) FROM t1; +hex(b) + +00 +20 +3030303030303030303030303030303030303030303030303030303030303030 +3030303030303030303030303030303030303030303030303030303030303031 +3030303030303030303030303030303030303030303030303030303030303032 +DROP TABLE t1; +CREATE TABLE t1 ( +b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", +PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); +ERROR 23000: Duplicate entry ' ' for key 'PRIMARY' +INSERT INTO t1 (b) VALUES (''), (_binary 0x0); +SELECT hex(b) FROM t1; +hex(b) +00 + +DROP TABLE t1; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result new file mode 100644 index 0000000000000..6de8c0331cd9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varbinary.result @@ -0,0 +1,93 @@ +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARBINARY(0) , +v1 VARBINARY(1) , +v64 VARBINARY(64) , +v65000 VARBINARY(65000) , +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) YES NULL +CREATE TABLE t2 (v VARBINARY(65532) , PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varbinary(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; +HEX(v0) HEX(v1) HEX(v64) HEX(v65000) + + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E2057652776652070726F7669646564206C696E6B7320746F20416D617A6F6E2E636F6D206865726520666F7220636F6E76656E69656E63652C2062757420746865792063616E20626520666F756E64206174206D616E79206F7468657220626F6F6B73746F7265732C20626F7468206F6E6C696E6520616E64206F66662E0A0A2020496620796F752077616E7420746F206861766520796F7572206661766F72697465204D7953514C202F204D61726961444220626F6F6B206C697374656420686572652C20706C65617365206C65617665206120636F6D6D656E742E0A2020466F7220646576656C6F706572732077686F2077616E7420746F20636F6465206F6E204D617269614442206F72204D7953514C0A0A2020202020202A20556E6465727374616E64696E67204D7953514C20496E7465726E616C73206279205361736861205061636865762C20666F726D6572204D7953514C20646576656C6F706572206174204D7953514C2041422E0A2020202020202020202020206F205468697320697320746865206F6E6C7920626F6F6B207765206B6E6F772061626F75742074686174206465736372696265732074686520696E7465726E616C73206F66204D617269614442202F204D7953514C2E2041206D757374206861766520666F7220616E796F6E652077686F2077616E747320746F20756E6465727374616E6420616E6420646576656C6F70206F6E204D617269614442210A2020202020202020202020206F204E6F7420616C6C20746F706963732061726520636F766572656420616E6420736F6D652070617274732061726520736C696768746C79206F757464617465642C20627574207374696C6C20746865206265737420626F6F6B206F6E207468697320746F7069632E200A2020202020202A204D7953514C20352E3120506C7567696E20446576656C6F706D656E742062792053657267656920476F6C75626368696B20616E6420416E64726577204875746368696E67730A2020202020202020202020206F2041206D757374207265616420666F7220616E796F6E652077616E74696E6720746F207772697465206120706C7567696E20666F72204D6172696144422C207772697474656E20627920746865205365726765692077686F2064657369676E65642074686520706C7567696E20696E7465726661636520666F72204D7953514C20616E64204D61726961444221200A0A2020466F72204D617269614442202F204D7953514C20656E642075736572730A0A2020202020202A204D61726961444220437261736820436F757273652062792042656E20466F7274610A2020202020202020202020206F204669727374204D61726961444220626F6F6B210A2020202020202020202020206F20466F722070656F706C652077686F2077616E7420746F206C6561726E2053514C20616E642074686520626173696373206F66204D6172696144422E0A2020202020202020202020206F204E6F77207368697070696E672E20507572636861736520617420416D617A6F6E2E636F6D206F7220796F7572206661766F7269746520626F6F6B73656C6C65722E200A0A2020202020202A2053514C2D393920436F6D706C6574652C205265616C6C792062792050657465722047756C75747A616E20262054727564792050656C7A65722E0A2020202020202020202020206F2045766572797468696E6720796F752077616E74656420746F206B6E6F772061626F7574207468652053514C203939207374616E646172642E20457863656C6C656E74207265666572656E636520626F6F6B210A2020202020202020202020206F204672656520746F207265616420696E20746865204B6E6F776C656467656261736521200A0A2020202020202A204D7953514C20283474682045646974696F6E29206279205061756C204475426F69730A2020202020202020202020206F20546865202764656661756C742720626F6F6B20746F207265616420696620796F7520776F6E7420746F206C6561726E20746F20757365204D7953514C202F204D6172696144422E200A0A2020202020202A204D7953514C20436F6F6B626F6F6B206279205061756C204475426F69730A2020202020202020202020206F2041206C6F74206F66206578616D706C6573206F6620686F7720746F20757365204D7953514C2E204173207769746820616C6C206F66205061756C277320626F6F6B732C206974277320776F727468206974732077656967687420696E20676F6C6420616E64206576656E20656E6A6F7961626C652072656164696E6720666F7220737563682061202764727927207375626A6563742E200A0A2020202020202A204869676820506572666F726D616E6365204D7953514C2C205365636F6E642045646974696F6E2C204279204261726F6E20536368776172747A2C205065746572205A6169747365762C20566164696D20546B616368656E6B6F2C204A6572656D7920442E205A61776F646E792C2041726A656E204C656E747A2C20446572656B204A2E2042616C6C696E672C20657420616C2E0A2020202020202020202020206F20224869676820506572666F726D616E6365204D7953514C2069732074686520646566696E697469766520677569646520746F206275696C64696E6720666173742C2072656C6961626C652073797374656D732077697468204D7953514C2E205772697474656E206279206E6F74656420657870657274732077697468207965617273206F66207265616C2D776F726C6420657870657269656E6365206275696C64696E672076657279206C617267652073797374656D732C207468697320626F6F6B20636F7665727320657665727920617370656374206F66204D7953514C20706572666F726D616E636520696E2064657461696C2C20616E6420666F6375736573206F6E20726F627573746E6573732C2073656375726974792C20616E64206461746120696E746567726974792E204C6561726E20616476616E63656420746563686E697175657320696E20646570746820736F20796F752063616E206272696E67206F7574204D7953514C27732066756C6C20706F7765722E22202846726F6D2074686520626F6F6B206465736372697074696F6E206174204F275265696C6C7929200A0A2020202020202A204D7953514C2041646D696E20436F6F6B626F6F6B0A2020202020202020202020206F204120717569636B20737465702D62792D7374657020677569646520666F72204D7953514C20757365727320616E642064617461626173652061646D696E6973747261746F727320746F207461636B6C65207265616C2D776F726C64206368616C6C656E6765732077697468204D7953514C20636F6E66696775726174696F6E20616E642061646D696E697374726174696F6E200A0A2020202020202A204D7953514C20352E302043657274696669636174696F6E2053747564792047756964652C204279205061756C204475426F69732C2053746566616E2048696E7A2C204361727374656E20506564657273656E0A2020202020202020202020206F205468697320697320746865206F6666696369616C20677569646520746F20636F766572207468652070617373696E67206F66207468652074776F204D7953514C2043657274696669636174696F6E206578616D696E6174696F6E732E2049742069732076616C69642074696C6C2076657273696F6E20352E30206F6620746865207365727665722C20736F207768696C65206974206D697373657320616C6C2074686520666561747572657320617661696C61626C6520696E204D7953514C20352E3120616E6420677265617465722028696E636C7564696E67204D61726961444220352E3120616E642067726561746572292C2069742070726F7669646573206120676F6F6420626173696320756E6465727374616E64696E67206F66204D7953514C20666F722074686520656E642D757365722E20 +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +Warning 1265 Data truncated for column 'v65000' at row 6 +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; +HEX(v0) HEX(v1) HEX(v64) LENGTH(HEX(v65000)) + 0 + 61 0 + 48 61486572652069732061206C697374206F66207265636F6D6D656E64656420626F6F6B73206F6E204D61726961444220616E64204D7953514C2E205765277665 5932 + 61 61616263646566676869206162636465666768692061626364656667686920616263646566676869206162636465666768692061626364656667686920616263 130000 + 79 4F6E63652074686572652C20646F75626C6520636865636B207468617420616E2061727469636C6520646F65736E277420616C7265616479206578697374 5930 + 79 63636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363 130000 +ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) ; +Warnings: +Note 1246 Converting column 'v65536' from VARBINARY to BLOB +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varbinary(0) YES NULL +v1 varbinary(1) YES NULL +v64 varbinary(64) NO PRI NULL +v65000 varbinary(65000) YES NULL +v65536 mediumblob YES NULL +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result new file mode 100644 index 0000000000000..b329718e4e012 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -0,0 +1,743 @@ +drop table if exists t1,t2; +# +# A basic test whether endspace-aware variable length encoding +# works when in PK +# +create table t1 ( pk varchar(10) primary key, col1 int not null) ; +insert into t1 values ('a', 1); +select * from t1; +pk col1 +a 1 +drop table t1; +DROP TABLE IF EXISTS t1, t2; +CREATE TABLE t1 ( +v0 VARCHAR(0) , +v1 VARCHAR(1) , +v64 VARCHAR(64) , +v65000 VARCHAR(65000) , +PRIMARY KEY (v64) +) ENGINE=rocksdb; +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) YES NULL +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; +Field Type Null Key Default Extra +v varchar(65532) NO PRI NULL +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); +SELECT v0,v1,v64,v65000 FROM t1; +v0 v1 v64 v65000 + + + + + + + + + + + + y Once there, double check that an article doesn't already exist Here is a list of recommended books on MariaDB and MySQL. We've provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + o "High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL's full power." (From the book description at O'Reilly) + o A lot of examples of how to use MySQL. As with all of Paul's books, it's worth its weight in gold and even enjoyable reading for such a 'dry' subject. + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Free to read in the Knowledgebase! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + o The 'default' book to read if you wont to learn to use MySQL / MariaDB. + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + * MariaDB Crash Course by Ben Forta + * MySQL (4th Edition) by Paul DuBois + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + * MySQL Admin Cookbook + * MySQL Cookbook by Paul DuBois + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + For MariaDB / MySQL end users + For developers who want to code on MariaDB or MySQL + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +Warnings: +Warning 1265 Data truncated for column 'v0' at row 1 +Warning 1265 Data truncated for column 'v1' at row 1 +Warning 1265 Data truncated for column 'v64' at row 1 +Warning 1265 Data truncated for column 'v65000' at row 1 +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; +Warnings: +Warning 1265 Data truncated for column 'v0' at row 5 +Warning 1265 Data truncated for column 'v1' at row 5 +Warning 1265 Data truncated for column 'v64' at row 5 +Warning 1265 Data truncated for column 'v65000' at row 5 +Warning 1265 Data truncated for column 'v0' at row 6 +Warning 1265 Data truncated for column 'v1' at row 6 +Warning 1265 Data truncated for column 'v64' at row 6 +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; +v0 v1 v64 LENGTH(v65000) + 0 + a 0 + H aHere is a list of recommended books on MariaDB and MySQL. We've 2966 + a aabcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abc 65000 + y Once there, double check that an article doesn't already exist 2965 + y cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 65000 +ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) ; +Warnings: +Note 1246 Converting column 'v65536' from VARCHAR to TEXT +SHOW COLUMNS IN t1; +Field Type Null Key Default Extra +v0 varchar(0) YES NULL +v1 varchar(1) YES NULL +v64 varchar(64) NO PRI NULL +v65000 varchar(65000) YES NULL +v65536 mediumtext YES NULL +DROP TABLE t1, t2; +# +# Endspace-comparison tests: +# +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET latin1 COLLATE latin1_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET latin1 COLLATE latin1_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL col1 67 NULL # Using index +select col1, hex(col1) from t1; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 67 NULL # Using where; Using index +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL col1 67 NULL # Using index +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +14 a 2020202020202020202020202020202061 17 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf8 COLLATE utf8_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf8 COLLATE utf8_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL col1 195 NULL # Using index +select col1, hex(col1) from t1; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL col1 195 NULL # Using index +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +14 a 2020202020202020202020202020202061 17 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET ucs2 COLLATE ucs2_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 0061 a +b 006200200020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 00610009 a-tab +a 006100200009 a-space-tab +a 0061 a +b 006200200020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET ucs2 COLLATE ucs2_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # +select col1, hex(col1) from t1; +col1 hex(col1) +ab 00610062 +a 00610020 +a 0061 +a 0061002000200009 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 131 NULL # Using where +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 0061002000200009 +a 00610020 +a 0061 +ab 00610062 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL NULL NULL NULL # +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 00200020002000200020002000200020 16 +12 0020002000200020002000200020002000200020002000200020002000200020 32 +13 002000200020002000200020002000200020002000200020002000200020002000200020002000200020002000200020 48 +14 a 00200020002000200020002000200020002000200020002000200020002000200061 34 +21 002000200020002000200020002000200020 18 +22 00200020002000200020002000200020002000200020002000200020002000200020 34 +23 002000200020002000200020002000200020002000200020002000200020002000200020 36 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 61 a +b 622020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 6109 a-tab +a 612009 a-space-tab +a 61 a +b 622020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # +select col1, hex(col1) from t1; +col1 hex(col1) +ab 6162 +a 6120 +a 61 +a 61202009 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 259 NULL # Using where +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 61202009 +a 6120 +a 61 +ab 6162 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL NULL NULL NULL # +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 2020202020202020 8 +12 20202020202020202020202020202020 16 +13 202020202020202020202020202020202020202020202020 24 +14 a 2020202020202020202020202020202061 17 +21 202020202020202020 9 +22 2020202020202020202020202020202020 17 +23 202020202020202020202020202020202020 18 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +# +# Issue 257: Sort order for varchars is different between +# MyISAM/InnoDB vs MyRocks +# +create table t1 ( +pk varchar(64) CHARACTER SET utf16 COLLATE utf16_bin, +col1 varchar(64), +primary key (pk) +); +insert into t1 values ('a','a'); +insert into t1 values ('a ', 'a-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values('b ', 'b-2x-space'); +insert into t1 values ('b', 'b'); +ERROR 23000: Duplicate entry 'b' for key 'PRIMARY' +select pk, hex(pk), col1 from t1; +pk hex(pk) col1 +a 0061 a +b 006200200020 b-2x-space +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; +pk hex(pk) col1 +a 00610009 a-tab +a 006100200009 a-space-tab +a 0061 a +b 006200200020 b-2x-space +# Try longer values +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); +ERROR 23000: Duplicate entry 'a ' for key 'PRIMARY' +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; +pk col1 +a a-tab +a a-space-tab +a a +b b-2x-space +c c-10-x-space +drop table t1; +# Secondary index +create table t1 ( +pk int not null primary key, +col1 varchar(64) CHARACTER SET utf16 COLLATE utf16_bin, +col2 varchar(64), +key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # +select col1, hex(col1) from t1; +col1 hex(col1) +ab 00610062 +a 00610020 +a 0061 +a 0061002000200009 +# Must show 'using index' for latin1_bin and utf8_bin: +explain +select col1, hex(col1) from t1 where col1 < 'b'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # col1 col1 259 NULL # Using where +select col1, hex(col1) from t1 where col1 < 'b'; +col1 hex(col1) +a 0061002000200009 +a 00610020 +a 0061 +ab 00610062 +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); +explain +select pk, col1, hex(col1), length(col1) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 # NULL NULL NULL NULL # +select pk, col1, hex(col1), length(col1) from t1; +pk col1 hex(col1) length(col1) +10 0 +11 00200020002000200020002000200020 16 +12 0020002000200020002000200020002000200020002000200020002000200020 32 +13 002000200020002000200020002000200020002000200020002000200020002000200020002000200020002000200020 48 +14 a 00200020002000200020002000200020002000200020002000200020002000200061 34 +21 002000200020002000200020002000200020 18 +22 00200020002000200020002000200020002000200020002000200020002000200020 34 +23 002000200020002000200020002000200020002000200020002000200020002000200020 36 +drop table t1; +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); +select pk,length(a) from t1 force index(a) where a < 'zz'; +pk length(a) +1 301 +2 301 +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; +pk length(a) rtrim(a) +1 301 a +2 301 b +drop table t1; +create table t1 ( +pk int primary key, +col1 varchar(10) collate utf8mb4_bin not null, +col2 varchar(20), +key(col1) +) engine=rocksdb; +insert into t1 values (1, 'ab','ab'); +insert into t1 values (2, 'ab\0','ab0'); +select pk, hex(col1), col2 from t1 force index(col1) order by col1; +pk hex(col1) col2 +2 616200 ab0 +1 6162 ab +select pk, hex(col1), col2 from t1 ignore index(col1) order by col1; +pk hex(col1) col2 +2 616200 ab0 +1 6162 ab +drop table t1; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc '); +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; +set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; +set @save_rocksdb_verify_row_debug_checksums = @@session.rocksdb_verify_row_debug_checksums; +set global rocksdb_checksums_pct = 100; +set session rocksdb_verify_row_debug_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'a'); +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL # +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +index_name count +email_i 1 +drop table t; +set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; +set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result new file mode 100644 index 0000000000000..8de94e0297e82 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_check.result @@ -0,0 +1,117 @@ +connect con1, localhost, root,,; +connect con2, localhost, root,,; +connect con3, localhost, root,,; +connection default; +set debug_sync='RESET'; +drop table if exists t1; +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +connection con1; +begin; +insert into t1 values (1,1); +connection con2; +set session rocksdb_lock_wait_timeout=50; +begin; +insert into t1 values (1,2); +connection con1; +commit; +connection con2; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +commit; +select * from t1; +id value +1 1 +truncate table t1; +connection con1; +begin; +insert into t2 values (1,1,1); +connection con2; +begin; +insert into t2 values (2,1,2); +connection con1; +commit; +connection con2; +ERROR 23000: Duplicate entry '1' for key 'id2' +commit; +select * from t2; +id id2 value +1 1 1 +truncate table t2; +connection con1; +begin; +insert into t1 values (1,1); +connection con2; +begin; +insert into t1 values (1,2); +connection con1; +rollback; +connection con2; +commit; +select * from t1; +id value +1 2 +truncate table t1; +connection con1; +begin; +insert into t2 values (1,1,1); +connection con2; +begin; +insert into t2 values (2,1,2); +connection con1; +rollback; +connection con2; +commit; +select * from t2; +id id2 value +2 1 2 +truncate table t2; +connection con1; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1'; +insert into t1 values (1,1); +connection con2; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2'; +insert into t2 values (1,1,1); +connection default; +set debug_sync='now WAIT_FOR parked1'; +set debug_sync='now WAIT_FOR parked2'; +connection con3; +set session rocksdb_lock_wait_timeout=1; +insert into t1 values (1,2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +insert into t2 values (2,1,2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection default; +set debug_sync='now SIGNAL go1'; +set debug_sync='now SIGNAL go2'; +connection con1; +connection con2; +connection default; +insert into t1 values (1,2); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +insert into t2 values (2,1,2); +ERROR 23000: Duplicate entry '1' for key 'id2' +select * from t1; +id value +1 1 +select * from t2; +id id2 value +1 1 1 +connection default; +set debug_sync='RESET'; +disconnect con1; +disconnect con2; +disconnect con3; +drop table t1, t2; +connection default; +drop table if exists t1,t2,t3; +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +create table t3 (id int, value int) engine=rocksdb; +SET @old_val = @@session.unique_checks; +set @@session.unique_checks = FALSE; +insert into t1 values (1, 1), (1, 2); +insert into t2 values (1, 1, 1), (1, 2, 1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +insert into t3 values (1, 1), (1, 1); +set @@session.unique_checks = @old_val; +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result new file mode 100644 index 0000000000000..a37e7f1cb3178 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec.result @@ -0,0 +1,221 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), +id4 INT, id5 VARCHAR(32), +value1 INT, value2 INT, value3 VARCHAR(32), +PRIMARY KEY (id1, id2) , +UNIQUE INDEX (id2, id1) , +UNIQUE INDEX (id2, id3, id4) , +INDEX (id1) , +INDEX (id3, id1) , +UNIQUE INDEX(id5) , +INDEX (id2, id5)) ENGINE=ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test inserting a key that returns duplicate error +INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5' for key 'PRIMARY' +INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10' for key 'PRIMARY' +INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5-5' for key 'id2_2' +INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11); +ERROR 23000: Duplicate entry '1' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11); +ERROR 23000: Duplicate entry '5' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11); +ERROR 23000: Duplicate entry '10' for key 'id5' +# Test updating a key that returns duplicate error +UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +UPDATE t1 SET id2=1, id3=1, id4=1; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test updating a key to itself +UPDATE t1 set id2=id4; +UPDATE t1 set id5=id3, value1=value2; +UPDATE t1 set value3=value1; +# Test modifying values should not cause duplicates +UPDATE t1 SET value1=value3+1; +UPDATE t1 SET value3=value3 div 2; +UPDATE t1 SET value2=value3; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test NULL values are considered unique +INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20); +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23), +(24, 24, 24, 24, 24, 24, 24, 24), +(25, 10, 10, 10, 25, 25, 25, 25), +(26, 26, 26, 26, 26, 26, 26, 26); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +connection con1; +BEGIN; +INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); +connection con2; +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Primary key should prevent duplicate on insert +INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Primary key should prevent duplicate on update +UPDATE t1 SET id1=30, id2=31 WHERE id2=10; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Unique secondary key should prevent duplicate on insert +INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Unique secondary key should prevent duplicate on update +UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +UPDATE t1 SET id5=34 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 31, 32, 33, 37, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 37, 37, 37, 34, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# NULL values are unique and duplicates in value fields are ignored +INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), +(38, 31, 32, NULL, 38, 37, 37, 37), +(39, 31, 32, NULL, 39, 37, 37, 37); +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +# Fail on duplicate key update for row added in our transaction +UPDATE t1 SET id5=37 WHERE id1=38; +ERROR 23000: Duplicate entry '37' for key 'id5' +# Fail on lock timeout for row modified in another transaction +UPDATE t1 SET id5=34 WHERE id1=38; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# NULL values are unique +UPDATE t1 SET id5=NULL WHERE value1 > 37; +connection con1; +COMMIT; +connection con2; +COMMIT; +connection con2; +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +connection con1; +BEGIN; +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +connection con2; +# When transaction is pending, fail on lock acquisition +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +connection con1; +COMMIT; +connection con2; +# When transaction is committed, fail on duplicate key +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +Got one of the listed errors +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR 23000: Duplicate entry '40-40-40' for key 'id2_2' +ROLLBACK; +SELECT * FROM t1; +id1 id2 id3 id4 id5 value1 value2 value3 +1 1 1 1 1 2 0 0 +2 2 2 2 2 3 1 1 +3 3 3 3 3 4 1 1 +4 4 4 4 4 5 2 2 +5 5 5 5 5 6 2 2 +6 6 6 6 6 7 3 3 +7 7 7 7 7 8 3 3 +8 8 8 8 8 9 4 4 +9 9 9 9 9 10 4 4 +10 10 10 10 10 11 5 5 +20 20 20 NULL NULL 20 20 20 +21 20 20 NULL NULL 20 20 20 +22 20 20 NULL NULL 20 20 20 +30 31 32 33 34 30 30 30 +37 31 32 NULL 37 37 37 37 +38 31 32 NULL 38 37 37 37 +39 31 32 NULL 39 37 37 37 +40 40 40 40 40 40 40 40 +disconnect con1; +disconnect con2; +connection default; +DROP TABLE t1; +# +# Issue #88: Creating unique index over column with duplicate values succeeds +# +create table t1 (pk int primary key, a int) engine=rocksdb; +insert into t1 values +(1, 1), +(2, 2), +(3, 3), +(4, 1), +(5, 5); +alter table t1 add unique(a); +ERROR 23000: Duplicate entry '1' for key 'a' +drop table t1; +# +# Issue #111 +# +CREATE TABLE t2 (pk int, a int, PRIMARY KEY (pk, a), UNIQUE KEY (a)) ENGINE=ROCKSDB PARTITION BY KEY (a) PARTITIONS 16; +INSERT INTO t2 VALUES (1,1); +INSERT INTO t2 VALUES (1,1); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +INSERT INTO t2 VALUES (2,1); +ERROR 23000: Duplicate entry '1' for key 'a' +DROP TABLE t2; +# +# Issue #491 (https://github.com/facebook/mysql-5.6/issues/491) +# +CREATE TABLE t (a BLOB, PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result new file mode 100644 index 0000000000000..210c74098af3b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unique_sec_rev_cf.result @@ -0,0 +1,177 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), +id4 INT, id5 VARCHAR(32), +value1 INT, value2 INT, value3 VARCHAR(32), +PRIMARY KEY (id1, id2) COMMENT 'rev:cf', +UNIQUE INDEX (id2, id1) COMMENT 'rev:cf', +UNIQUE INDEX (id2, id3, id4) COMMENT 'rev:cf', +INDEX (id1) COMMENT 'rev:cf', +INDEX (id3, id1) COMMENT 'rev:cf', +UNIQUE INDEX(id5) COMMENT 'rev:cf', +INDEX (id2, id5)) ENGINE=ROCKSDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test inserting a key that returns duplicate error +INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1' for key 'PRIMARY' +INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5' for key 'PRIMARY' +INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10' for key 'PRIMARY' +INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '5-5-5' for key 'id2_2' +INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11); +ERROR 23000: Duplicate entry '1' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11); +ERROR 23000: Duplicate entry '5' for key 'id5' +INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11); +ERROR 23000: Duplicate entry '10' for key 'id5' +# Test updating a key that returns duplicate error +UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +UPDATE t1 SET id2=1, id3=1, id4=1; +ERROR 23000: Duplicate entry '1-1-1' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test updating a key to itself +UPDATE t1 set id2=id4; +UPDATE t1 set id5=id3, value1=value2; +UPDATE t1 set value3=value1; +# Test modifying values should not cause duplicates +UPDATE t1 SET value1=value3+1; +UPDATE t1 SET value3=value3 div 2; +UPDATE t1 SET value2=value3; +SELECT COUNT(*) FROM t1; +COUNT(*) +10 +# Test NULL values are considered unique +INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20); +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23), +(24, 24, 24, 24, 24, 24, 24, 24), +(25, 10, 10, 10, 25, 25, 25, 25), +(26, 26, 26, 26, 26, 26, 26, 26); +ERROR 23000: Duplicate entry '10-10-10' for key 'id2_2' +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +connection con1; +BEGIN; +INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); +connection con2; +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +13 +# Primary key should prevent duplicate on insert +INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Primary key should prevent duplicate on update +UPDATE t1 SET id1=30, id2=31 WHERE id2=10; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Unique secondary key should prevent duplicate on insert +INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Unique secondary key should prevent duplicate on update +UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +UPDATE t1 SET id5=34 WHERE id2=8; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# Adding multiple rows where one of the rows fail the duplicate +# check should fail the whole statement +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 31, 32, 33, 37, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), +(36, 36, 36, 36, 36, 36, 36, 36), +(37, 37, 37, 37, 34, 37, 37, 37), +(38, 38, 38, 38, 38, 38, 38, 38); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# NULL values are unique and duplicates in value fields are ignored +INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), +(38, 31, 32, NULL, 38, 37, 37, 37), +(39, 31, 32, NULL, 39, 37, 37, 37); +SELECT COUNT(*) FROM t1; +COUNT(*) +16 +# Fail on duplicate key update for row added in our transaction +UPDATE t1 SET id5=37 WHERE id1=38; +ERROR 23000: Duplicate entry '37' for key 'id5' +# Fail on lock timeout for row modified in another transaction +UPDATE t1 SET id5=34 WHERE id1=38; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +# NULL values are unique +UPDATE t1 SET id5=NULL WHERE value1 > 37; +connection con1; +COMMIT; +connection con2; +COMMIT; +connection con2; +BEGIN; +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +connection con1; +BEGIN; +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +connection con2; +# When transaction is pending, fail on lock acquisition +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SELECT COUNT(*) FROM t1; +COUNT(*) +17 +connection con1; +COMMIT; +connection con2; +# When transaction is committed, fail on duplicate key +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +Got one of the listed errors +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); +ERROR 23000: Duplicate entry '40-40-40' for key 'id2_2' +ROLLBACK; +SELECT * FROM t1; +id1 id2 id3 id4 id5 value1 value2 value3 +40 40 40 40 40 40 40 40 +39 31 32 NULL 39 37 37 37 +38 31 32 NULL 38 37 37 37 +37 31 32 NULL 37 37 37 37 +30 31 32 33 34 30 30 30 +22 20 20 NULL NULL 20 20 20 +21 20 20 NULL NULL 20 20 20 +20 20 20 NULL NULL 20 20 20 +10 10 10 10 10 11 5 5 +9 9 9 9 9 10 4 4 +8 8 8 8 8 9 4 4 +7 7 7 7 7 8 3 3 +6 6 6 6 6 7 3 3 +5 5 5 5 5 6 2 2 +4 4 4 4 4 5 2 2 +3 3 3 3 3 4 1 1 +2 2 2 2 2 3 1 1 +1 1 1 1 1 2 0 0 +disconnect con1; +disconnect con2; +connection default; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result b/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result new file mode 100644 index 0000000000000..32776e19767f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/unsupported_tx_isolations.result @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1); +SET session transaction isolation level read uncommitted; +begin; +insert into t1 values (2,1); +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level READ-UNCOMMITTED +select * from t1 where id=1; +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level READ-UNCOMMITTED +rollback; +SET session transaction isolation level serializable; +begin; +insert into t1 values (2,1); +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE +select * from t1 where id=1; +ERROR HY000: MyRocks supports only READ COMMITTED and REPEATABLE READ isolation levels. Please change from current isolation level SERIALIZABLE +rollback; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update.result b/storage/rocksdb/mysql-test/rocksdb/r/update.result new file mode 100644 index 0000000000000..33bf8d09405b9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update.result @@ -0,0 +1,113 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +UPDATE t1 SET a=a+100; +SELECT a,b FROM t1; +a b +101 a +101 a +10100 foobar +10100 foobar +102 b +102 b +103 c +103 c +104 d +104 d +105 e +105 e +UPDATE t1 SET a=a-100, b=DEFAULT WHERE a>100; +SELECT a,b FROM t1; +a b +1 NULL +1 NULL +10000 NULL +10000 NULL +2 NULL +2 NULL +3 NULL +3 NULL +4 NULL +4 NULL +5 NULL +5 NULL +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY b DESC, a ASC LIMIT 1; +SELECT a,b FROM t1; +a b +1 NULL +1 update +10000 NULL +10000 NULL +2 NULL +2 NULL +3 NULL +3 NULL +4 NULL +4 NULL +5 NULL +5 NULL +DROP TABLE t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +BEGIN; +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-50, b=DEFAULT WHERE a>100; +COMMIT; +SELECT * FROM t1 ORDER BY pk; +a b pk +10050 NULL 12 +10050 NULL 6 +51 NULL 1 +51 NULL 7 +52 NULL 2 +52 NULL 8 +53 NULL 3 +53 NULL 9 +54 NULL 10 +54 NULL 4 +55 NULL 11 +55 NULL 5 +BEGIN; +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3; +UPDATE t1 SET b = ''; +ROLLBACK; +SELECT * FROM t1 ORDER BY pk; +a b pk +51 NULL 1 +52 NULL 2 +53 NULL 3 +54 NULL 4 +55 NULL 5 +10050 NULL 6 +51 NULL 7 +52 NULL 8 +53 NULL 9 +54 NULL 10 +55 NULL 11 +10050 NULL 12 +BEGIN; +UPDATE t1 SET b = 'update2' WHERE a <= 100; +SAVEPOINT spt1; +UPDATE t1 SET b = ''; +ROLLBACK TO SAVEPOINT spt1; +ERROR HY000: MyRocks currently does not support ROLLBACK TO SAVEPOINT if modifying rows. +UPDATE t1 SET b = 'upd' WHERE a = 10050; +COMMIT; +ERROR HY000: This transaction was rolled back and cannot be committed. Only supported operation is to roll it back, so all pending changes will be discarded. Please restart another transaction. +SELECT * FROM t1 ORDER BY pk; +a b pk +51 NULL 1 +52 NULL 2 +53 NULL 3 +54 NULL 4 +55 NULL 5 +10050 NULL 6 +51 NULL 7 +52 NULL 8 +53 NULL 9 +54 NULL 10 +55 NULL 11 +10050 NULL 12 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result new file mode 100644 index 0000000000000..d36371be45b4a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_ignore.result @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE IGNORE t1 SET b = 'upd1' WHERE b IS NOT NULL ORDER BY a LIMIT 1; +SELECT a,b FROM t1 ORDER BY pk; +a b +1 upd1 +2 b +3 c +4 d +5 e +10000 foobar +1 a +2 b +3 c +4 d +5 e +10000 foobar +UPDATE t1, t2 SET b = 'upd2a', c = 'upd2b' +WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +ERROR 21000: Subquery returns more than 1 row +UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b' +WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +Warnings: +Warning 1242 Subquery returns more than 1 row +SELECT a,b FROM t1 ORDER BY pk; +a b +1 upd2a +2 upd2a +3 upd2a +4 upd2a +5 upd2a +10000 upd2a +1 a +2 upd2a +3 upd2a +4 upd2a +5 upd2a +10000 upd2a +SELECT c,d FROM t2 ORDER BY pk; +c d +upd2b 1 +upd2b 2 +upd2b 3 +upd2b 4 +upd2b 5 +upd2b 10000 +upd2b 1 +upd2b 2 +upd2b 3 +upd2b 4 +upd2b 5 +upd2b 10000 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result b/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result new file mode 100644 index 0000000000000..294c07b2a799e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_multi.result @@ -0,0 +1,691 @@ +DROP TABLE IF EXISTS t1,t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar 10000 +foobar 10000 +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +SELECT a,b FROM t1; +a b +1 a +1 a +101 b +101 b +101 c +101 c +101 d +101 d +101 e +101 e +101 foobar +101 foobar +SELECT c,d FROM t2; +c d +foobar NULL +foobar NULL +multi 1 +multi 1 +multi 2 +multi 2 +multi 3 +multi 3 +multi 4 +multi 4 +multi 5 +multi 5 +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result b/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result new file mode 100644 index 0000000000000..576fe6f67542e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/update_with_keys.result @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, INDEX(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +UPDATE t1 SET a=100, b='f' WHERE b IN ('b','c'); +UPDATE t1 SET b='m' WHERE b = 'f'; +UPDATE t1 SET b='z' WHERE a < 2; +UPDATE t1 SET b=''; +SELECT a,b FROM t1; +a b +1 +100 +100 +4 +5 +6 +7 +8 +DROP TABLE t1; +# RocksDB: skip the test for secondary UNIQUE keys. +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a'); +UPDATE t1 SET a=a+200; +UPDATE t1 SET a=0 WHERE a > 250; +UPDATE t1 SET a=205 WHERE a=200; +ERROR 23000: Duplicate entry '205' for key 'PRIMARY' +UPDATE t1 SET a=12345 ORDER BY a DESC, b LIMIT 1; +SELECT a,b FROM t1; +a b +0 a +12345 e +200 f +201 a +202 b +203 c +204 d +UPDATE t1 SET a=80 WHERE a IN (202,203); +ERROR 23000: Duplicate entry '80' for key 'PRIMARY' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result new file mode 100644 index 0000000000000..59fb1e41bddbe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result @@ -0,0 +1,4 @@ +call mtr.add_suppression("rocksdb"); +call mtr.add_suppression("Aborting"); +FOUND 1 /enable both use_direct_reads/ in mysqld.1.err +FOUND 1 /enable both use_direct_writes/ in mysqld.1.err diff --git a/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result new file mode 100644 index 0000000000000..b5ab85d14c6e1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/validate_datadic.result @@ -0,0 +1,8 @@ +call mtr.add_suppression('RocksDB: Schema mismatch'); +CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; +CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4; +"Expect errors that we are missing two .frm files" +FOUND 2 /Schema mismatch/ in mysqld.1.err +"Expect an error that we have an extra .frm file" +FOUND 3 /Schema mismatch/ in mysqld.1.err +DROP TABLE t1, t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result new file mode 100644 index 0000000000000..8f8495302e7de --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result @@ -0,0 +1,40 @@ +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=true; +create table aaa (id int primary key, i int) engine rocksdb; +set @save_rocksdb_flush_log_at_trx_commit= @@global.rocksdb_flush_log_at_trx_commit; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(1,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +0 +insert aaa(id, i) values(2,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +0 +insert aaa(id, i) values(3,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +0 +SET LOCAL rocksdb_flush_log_at_trx_commit=1; +insert aaa(id, i) values(4,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +1 +insert aaa(id, i) values(5,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +2 +insert aaa(id, i) values(6,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +variable_value-@a +3 +SET GLOBAL rocksdb_background_sync=on; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; +insert aaa(id, i) values(7,1); +truncate table aaa; +drop table aaa; +SET GLOBAL rocksdb_flush_log_at_trx_commit=@save_rocksdb_flush_log_at_trx_commit; +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=false; +SET GLOBAL rocksdb_background_sync=off; diff --git a/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk b/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk new file mode 100644 index 0000000000000..4617b6d9fc3a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/slow_query_log.awk @@ -0,0 +1,27 @@ +#!/bin/awk + +/Query_time:/ { + results["Rows_examined:"] = "uninit"; + results["RocksDB_key_skipped:"] = "uninit"; + results["RocksDB_del_skipped:"] = "uninit"; + + for (i = 2; i <= NF; i = i+2) { + results[$i] = $(i+1); + } + + # If the output format has changed and we don't find these keys, + # error out. + if (results["Rows_examined:"] == "uninit" || + results["RocksDB_key_skipped:"] == "uninit" || + results["RocksDB_del_skipped:"] == "uninit") { + exit(-2); + } + + if (results["Rows_examined:"] == 0) { + next + } + if (results["RocksDB_key_skipped:"] == 0 || + results["RocksDB_del_skipped:"] == 0) { + exit(-1); + } +} diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.opt b/storage/rocksdb/mysql-test/rocksdb/suite.opt new file mode 100644 index 0000000000000..f5dc0ce891c0d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/suite.opt @@ -0,0 +1,2 @@ +--ignore-db-dirs=.rocksdb --plugin-load=$HA_ROCKSDB_SO --default-storage-engine=rocksdb + diff --git a/storage/rocksdb/mysql-test/rocksdb/suite.pm b/storage/rocksdb/mysql-test/rocksdb/suite.pm new file mode 100644 index 0000000000000..79c630f87f12d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/suite.pm @@ -0,0 +1,25 @@ +package My::Suite::Rocksdb; + +# +# Note: ../rocksdb_sys_vars/suite.pm file has a similar +# function. If you modify this file, consider modifying that one, too. +# +@ISA = qw(My::Suite); +use My::Find; +use File::Basename; +use strict; + +sub is_default { not $::opt_embedded_server } + +my $sst_dump= +::mtr_exe_maybe_exists( + "$::bindir/storage/rocksdb$::opt_vs_config/sst_dump", + "$::path_client_bindir/sst_dump"); +return "RocksDB is not compiled, no sst_dump" unless $sst_dump; +$ENV{MARIAROCKS_SST_DUMP}="$sst_dump"; + +# Temporarily disable testing under valgrind, due to MDEV-12439 +return "RocksDB tests disabled under valgrind" if ($::opt_valgrind); + +bless { }; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/1st.test b/storage/rocksdb/mysql-test/rocksdb/t/1st.test new file mode 100644 index 0000000000000..cecef8b75379b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/1st.test @@ -0,0 +1,36 @@ +--source include/have_rocksdb.inc + +# +# This test checks some very basic capabilities +# which will be used in almost every other test, +# and will not be checked through support* variables. +# If this test does not pass, there is no point +# at executing other ones. +# +# Minimal requirements: +# - supported column types: INT, CHAR (default CHAR(8), INT(11)); +# - column attributes as declared in define_engine.inc ($default_col_opts) +# (by default empty, which means no additional attributes apart from the type); +# - table attributes as declared in define_engine.inc ($default_tbl_opts) +# (by default empty, which means no additional attributes apart from ENGINE); +# - CREATE TABLE .. (column1 , column2 ) ENGINE=; +# - INSERT INTO TABLE .. VALUES (val1,val2); +# - DROP TABLE .. +# - SELECT a,b FROM .. +# - SHOW CREATE TABLE .. +# - SHOW COLUMNS IN ... +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES (1, 1,'a'); +INSERT INTO t1 (a,b) VALUES (2,'b'); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt new file mode 100644 index 0000000000000..83ed8522e7298 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit-master.opt @@ -0,0 +1 @@ +--binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test new file mode 100644 index 0000000000000..009fd7beaf2f1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/2pc_group_commit.test @@ -0,0 +1,64 @@ +--source include/have_rocksdb.inc +--source include/have_log_bin.inc + +--echo # Disable for valgrind because this takes too long +--source include/not_valgrind.inc + +--disable_warnings +DROP DATABASE IF EXISTS mysqlslap; +--enable_warnings + +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb; + +--echo # 2PC enabled, MyRocks durability enabled +SET GLOBAL rocksdb_enable_2pc=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; + +--echo ## 2PC + durability + single thread +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + +--echo ## 2PC + durability + group commit +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + + +--echo # 2PC enabled, MyRocks durability disabled +SET GLOBAL rocksdb_enable_2pc=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + + +--echo # 2PC disabled, MyRocks durability enabled +SET GLOBAL rocksdb_enable_2pc=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=10000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)" +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs'; + + + + + + + +SET GLOBAL rocksdb_enable_2pc=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +DROP TABLE t1; +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test new file mode 100644 index 0000000000000..bb95f71456cab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test @@ -0,0 +1,347 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +## +## test adding index inplace +## + +# test basic add +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +# test add + drop (simultaneous) +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +## test multi-drop + multi-add +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +# test multi add + drop (simultaneous) +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD INDEX kb(b), DROP INDEX ka, ADD INDEX kba(b,a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +DROP TABLE t1; + +# test dropping and adding a key simultaneously w/ same name but different col +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +ALTER TABLE t1 DROP INDEX ka, ADD INDEX ka(b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(ka) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > 2; +DROP TABLE t1; + +## +## test adding index inplace w/ various column types +## + +# test basic add +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +DROP TABLE t1; + +## test add + drop (simultaneous) +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX ka(a), DROP INDEX kab, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > '2' AND b < 3; +DROP TABLE t1; + +### test multi-drop + multi-add +CREATE TABLE t1 (pk CHAR(8) PRIMARY KEY, a VARCHAR(11), b INT UNSIGNED) ENGINE=rocksdb charset utf8 collate utf8_bin; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +INSERT INTO t1 VALUES ('aaa', '1111', 1); +INSERT INTO t1 VALUES ('bbb', '2222', 2); +INSERT INTO t1 VALUES ('ccc', '3333', 3); +ALTER TABLE t1 ADD INDEX kab(a,b), ADD INDEX ka(a), ADD INDEX kb(b), ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX ka, DROP INDEX kb, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kab) WHERE a > '2' AND b < 3; +DROP TABLE t1; + +## +## test adding via CREATE/DROP index syntax +## +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +CREATE INDEX kb on t1 (b); +CREATE INDEX kba on t1 (b,a); +DROP INDEX ka on t1; +DROP INDEX kab on t1; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) WHERE b > 5; +--sorted_result +SELECT * FROM t1 FORCE INDEX(kba) WHERE a > 2; +DROP TABLE t1; + +# +# Create tables with partitions and try to update/select from them. +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +DROP INDEX kij ON t1; +SHOW CREATE TABLE t1; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# test failure in prepare phase (due to collation) +set @tmp_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +set global rocksdb_strict_collation_check=1; +CREATE TABLE t1 (a INT, b TEXT); + +--error 1105 +ALTER TABLE t1 ADD KEY kb(b(10)); +ALTER TABLE t1 ADD PRIMARY KEY(a); +DROP TABLE t1; +set global rocksdb_strict_collation_check= @tmp_rocksdb_strict_collation_check; + +# make sure race condition between connection close and alter on another +# connection is handled + +set global rocksdb_bulk_load=1; + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); + +--echo # Switch to connection con1 +connection con1; + +show global variables like 'rocksdb_bulk_load'; +show session variables like 'rocksdb_bulk_load'; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +INSERT INTO t1 VALUES (1,1); + +# Disconnect connection 1, this starts the code path that will call +# rocksdb_close_connection, ending the bulk load. +--echo # Disconnecting on con1 +disconnect con1; + +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); +--echo # Switch to connection con2 +connection con2; + +# when alter table happens, it tries to close all other TABLE instances +# when acquiring the exclusive lock for alter table (this happens in SQL layer) +# make sure bulk_load now handles this possible race condition properly +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); + +DROP TABLE t1; +disconnect con2; + +# make sure implicilty closing the alter from another session works + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); + +--echo # Switch to connection con1 +connection con1; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +set rocksdb_bulk_load=1; +INSERT INTO t1 VALUES (1,1); + +--echo # Switch to connection con2 +connection con2; + +# here, the bulk load hasn't been completed yet, and we are in conn2 +# therefore select count returns 0 +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); + +# implicilty close the table from connection 2 +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +SELECT COUNT(*) FROM t1 FORCE INDEX(PRIMARY); +SELECT COUNT(*) FROM t1 FORCE INDEX(kj); + +set global rocksdb_bulk_load=0; + +DROP TABLE t1; + +connection default; + + +SET @prior_rocksdb_merge_combine_read_size= @@rocksdb_merge_combine_read_size; +SET @prior_rocksdb_strict_collation_check= @@rocksdb_strict_collation_check; +SET @prior_rocksdb_merge_buf_size = @@rocksdb_merge_buf_size; + +SET global rocksdb_strict_collation_check = off; +SET session rocksdb_merge_combine_read_size = 566; +SET session rocksdb_merge_buf_size = 336; + +show variables like '%rocksdb_bulk_load%'; +CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB; +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +INSERT INTO t1 (a) VALUES (REPEAT("a", 80)); +ALTER TABLE t1 ADD INDEX ka(a), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +--sorted_result +SELECT * FROM t1 FORCE INDEX(ka) WHERE a > ""; +DROP TABLE t1; + +SET session rocksdb_merge_buf_size = @prior_rocksdb_merge_buf_size; +SET session rocksdb_merge_combine_read_size = @prior_rocksdb_merge_combine_read_size; +SET global rocksdb_strict_collation_check = @prior_rocksdb_strict_collation_check; + +# Test to make sure index statistics are updating properly +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now=1; + +--let $data_length_old = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) + +## uncomment to see the actual values +#--replace_column 8 # +#SHOW TABLE STATUS WHERE name LIKE 't1'; + +# Now do an alter and see what happens +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +--source include/restart_mysqld.inc +--source include/wait_until_connected_again.inc +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +analyze table t1; +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +--source include/restart_mysqld.inc +--source include/wait_until_connected_again.inc +--let $data_length_new = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--disable_query_log +--eval select $data_length_old < $data_length_new as "larger" + +# verifying multiple analyze table won't change stats +--disable_query_log +let $max = 10; +let $i = 1; +while ($i <= $max) { + let $analyze = ANALYZE TABLE t1; + inc $i; + eval $analyze; +} +--enable_query_log + +--let $data_length_new2 = query_get_value("select INDEX_LENGTH from information_schema.tables where table_schema=database() and table_name='t1'", INDEX_LENGTH, 1) +--eval select $data_length_new2 < $data_length_new * 1.5 as "same" + + +--enable_query_log + +## uncomment to see the actual values +#--replace_column 8 # +#SHOW TABLE STATUS WHERE name LIKE 't1'; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt new file mode 100644 index 0000000000000..436edf2b40c61 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality-master.opt @@ -0,0 +1 @@ +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test new file mode 100644 index 0000000000000..148edf7a3d2c2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_cardinality.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Test that fast secondary index creation updates cardinality properly +CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); + +SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed'; +send ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; + +connect (con1,localhost,root,,); + +# Flush memtable out to SST +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SET debug_sync= 'now SIGNAL flushed'; + +connection default; +reap; + +# Return the data for the primary key of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); + +# Return the data for the secondary index of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj"); + +disconnect con1; +SET debug_sync='RESET'; + +# cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test new file mode 100644 index 0000000000000..84fe0046e7bbf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc +--source include/have_partition.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# test crash recovery +# + +CREATE TABLE t1 (a INT, b INT, KEY ka(a), KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +--echo # crash_during_online_index_creation +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug_dbug="+d,crash_during_online_index_creation"; +--error 2013 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug_dbug="-d,crash_during_online_index_creation"; + +SHOW CREATE TABLE t1; +CHECK TABLE t1; + +DROP TABLE t1; + +# +# Test crash recovery with partitioned tables +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +--echo # crash_during_index_creation_partition +flush logs; + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug_dbug="+d,crash_during_index_creation_partition"; +--error 2013 +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SET SESSION debug_dbug="-d,crash_during_index_creation_partition"; + +SHOW CREATE TABLE t1; + +# here, the index numbers should be higher because previously 4 index numbers +# were allocated for the partitioned table +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# +# Test rollback on partitioned tables for inplace alter +# +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i), KEY(j)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +--echo # crash_during_index_creation_partition +flush logs; + +SET SESSION debug_dbug="+d,myrocks_simulate_index_create_rollback"; + +--error 1105 +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SET SESSION debug_dbug="-d,myrocks_simulate_index_create_rollback"; +SHOW CREATE TABLE t1; + +# here, the index numbers should be higher because previously 4 index numbers +# were allocated for the partitioned table +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test new file mode 100644 index 0000000000000..1ac382794ae30 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_sstfilewriter.test @@ -0,0 +1,105 @@ +--source include/have_rocksdb.inc + +# This test requires ~1.3G of disk space +--source include/big_test.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# Create a table with a primary key and one secondary key as well as one +# more column +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30)) COLLATE 'latin1_bin'; + +--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` + +# Create a text file with data to import into the table. +# The primary key is in sorted order and the secondary keys are randomly generated +--let ROCKSDB_INFILE = $file +perl; +my $fn = $ENV{'ROCKSDB_INFILE'}; +open(my $fh, '>>', $fn) || die "perl open($fn): $!"; +my $max = 3000000; +my @chars = ("A".."Z", "a".."z", "0".."9"); +my @lowerchars = ("a".."z"); +my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); +for (my $ii = 0; $ii < $max; $ii++) +{ + my $pk; + my $tmp = $ii; + foreach (@powers_of_26) + { + $pk .= $lowerchars[$tmp / $_]; + $tmp = $tmp % $_; + } + + my $num = int(rand(25)) + 6; + my $a; + $a .= $chars[rand(@chars)] for 1..$num; + + $num = int(rand(25)) + 6; + my $b; + $b .= $chars[rand(@chars)] for 1..$num; + print $fh "$pk\t$a\t$b\n"; +} +close($fh); +EOF + +--file_exists $file + +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +--disable_query_log +--echo LOAD DATA INFILE INTO TABLE t1; +eval LOAD DATA INFILE '$file' INTO TABLE t1; +--enable_query_log +set rocksdb_bulk_load=0; + +# Make sure all the data is there. +select count(pk) from t1; +select count(a) from t1; +select count(b) from t1; + +# now do fast secondary index creation +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +# disable duplicate index warning +--disable_warnings +# now do same index using copy algorithm +ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY; +--enable_warnings + +# checksum testing +SELECT COUNT(*) as c FROM +(SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', `b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE INDEX(`kb`) +UNION DISTINCT +SELECT COALESCE(LOWER(CONV(BIT_XOR(CAST(CRC32(CONCAT_WS('#', +`b`, CONCAT(ISNULL(`b`)))) AS UNSIGNED)), 10, 16)), 0) AS crc FROM `t1` FORCE +INDEX(`kb_copy`)) as temp; + +select count(*) from t1 FORCE INDEX(kb); +select count(*) from t1 FORCE INDEX(kb_copy); +select count(*) from t1 FORCE INDEX(PRIMARY); + +# drop the index +ALTER TABLE t1 DROP INDEX kb, ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX kb_copy, ALGORITHM=INPLACE; + +# add two indexes simultaneously +ALTER TABLE t1 ADD INDEX kb(b), ADD INDEX kab(a,b), ALGORITHM=INPLACE; +SELECT COUNT(*) FROM t1 FORCE INDEX(kab); +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +# Reverse CF testing, needs to be added to SSTFileWriter in reverse order +CREATE TABLE t1 (a INT PRIMARY KEY, b INT, KEY kab(a,b)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 DROP INDEX kab, ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +DROP TABLE t1; +--remove_file $file diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test new file mode 100644 index 0000000000000..375a63c3a3839 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test @@ -0,0 +1,82 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# test adding duplicate value before unique index +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +INSERT INTO t1 (a,b) VALUES (4,5); + +# should cause error here, duplicate value on b +--error 1062 +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +DROP TABLE t1; + +# test dup value AFTER unique index +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; + +# should error here, duplicate value on b +--error 1062 +INSERT INTO t1 (a,b) VALUES (4,5); + +# should succeed +INSERT INTO t1 (a,b) VALUES (5,8); + +SHOW CREATE TABLE t1; +SELECT * FROM t1 FORCE INDEX(kb); +DROP TABLE t1; + +# test what happens when duplicate nulls exist +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, NULL); +INSERT INTO t1 (a, b) VALUES (3, NULL); + +# should pass, because in MySQL we allow multiple NULLS in unique key +ALTER TABLE t1 ADD UNIQUE INDEX kb(b), ALGORITHM=INPLACE; +INSERT INTO t1 (a, b) VALUES (4, NULL); + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +DROP TABLE t1; + +## test case with multi-part key with nulls +CREATE TABLE t1 (a INT, b INT, c INT, PRIMARY KEY ka(a)) ENGINE=RocksDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (2,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (3,1,NULL); +INSERT INTO t1 (a,b,c) VALUES (4,1,5); + +# should pass +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c), ALGORITHM=INPLACE; + +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kbc); +DROP TABLE t1; + +## test case with table w/ no primary key, and we try to add unique key +CREATE TABLE t1 (a INT, b INT) ENGINE=RocksDB; +INSERT INTO t1 (a, b) VALUES (1, 5); +INSERT INTO t1 (a, b) VALUES (2, 6); +INSERT INTO t1 (a, b) VALUES (3, 7); + +# should fail, can't add unique index on table w/ no pk +--error 1105 +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); + +SHOW CREATE TABLE t1; +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test new file mode 100644 index 0000000000000..8dda4372eb394 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_pk_concurrent_insert.test @@ -0,0 +1,22 @@ + +# +# Tests concurrent inserts for tables with no primary key. +# + +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +--echo # Binary must be compiled with debug for this test +--source include/have_debug.inc + +# create the actual table +CREATE TABLE t1 (a INT) ENGINE=rocksdb; + +let $exec = python ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py root 127.0.0.1 $MASTER_MYPORT test t1 100 4; +exec $exec; + +SELECT COUNT(*) from t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test new file mode 100644 index 0000000000000..d1fe15b98fee4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test @@ -0,0 +1,91 @@ +--source include/have_rocksdb.inc + +# +# This test checks some very basic capabilities +# for tables without primary keys. A hidden pk will be generated under the hood +# in myrocks. Everything should work here as normal. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# test CREATE +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +## test ALTER +CREATE TABLE t1 (a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,'a'),(5,'z'); +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; + +--sorted_result +SELECT * FROM t1; +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; + +## test creating a table with primary and then dropping that key +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test CHECK TABLE +# CHECK TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support CHECK. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +CREATE TABLE t2 (a INT, b CHAR(8)) ENGINE=rocksdb; + +CHECK TABLE t1; +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; + +DROP TABLE t1, t2; + +# test disabling unique keys +--error 1105 +CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb; + +## test restarting a table that has no data +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; +--source include/restart_mysqld.inc + +## single delete statement should remove MULTIPLE rows (aka duplicate rows) +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (36,'foo'); +DELETE FROM t1 WHERE a = 35 AND b = 'foo'; +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test new file mode 100644 index 0000000000000..1f3ef49e534cd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key_with_sk.test @@ -0,0 +1,137 @@ +--source include/have_rocksdb.inc + +# +# This test checks some very basic capabilities +# for tables without primary keys. A hidden pk will be generated under the hood +# in myrocks. Everything should work here as normal. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +## test CREATE with SK +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +## test adding/dropping sk w/no pk +CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +ALTER TABLE t1 ADD INDEX (b); +--source no_primary_key_basic_ops.inc + +ALTER TABLE t1 DROP INDEX b; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test dropping pk w/ sk +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +ALTER TABLE t1 DROP COLUMN pk; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +--echo # +--echo # MDEV-4313: RocksDB: Server crashes in Rdb_key_def::setup on dropping the primary key column +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT NOT NULL, KEY(i)) ENGINE=RocksDB; +ALTER TABLE t1 DROP COLUMN `pk`; +DROP TABLE t1; + +# create table with multiple sk, make sure it still works +# test CREATE with SK +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test CREATE table with multi-part sk +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a, b)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test CREATE table with more than one sk +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a), KEY(b)) ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +# test check table with sk +CREATE TABLE t1 (a INT, b CHAR(8), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +DROP TABLE t1; + +## tables with multi-part secondary indexes + columns that dont belong to any +## secondary indexes +CREATE TABLE t1 (a INT, b INT, c INT, d INT, KEY kab(a, b), KEY kbc(b, c), KEY kabc(a,b,c)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c,d) VALUES (1,2,3,4); +INSERT INTO t1 (a,b,c,d) VALUES (5,6,7,8); +INSERT INTO t1 (a,b,c,d) VALUES (10,11,12,13); +INSERT INTO t1 (a,b,c,d) VALUES (14,15,16,17); + +--sorted_result +SELECT * FROM t1; +--sorted_result +SELECT * FROM t1 WHERE a = 1 OR a = 10; +--sorted_result +SELECT * FROM t1 WHERE c = 3 OR d = 17; +--sorted_result +SELECT * FROM t1 WHERE a > 5 OR d > 5; + +# force some of these selects to use different indexes and/or have the columns +# being selected also not contain column d +--sorted_result +SELECT a, b, c FROM t1 FORCE INDEX (kabc) WHERE a=1 OR b=11; +--sorted_result +SELECT d FROM t1 FORCE INDEX (kbc) WHERE b > 6 AND c > 12; + +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-100, b=99 WHERE a>100; +--sorted_result +SELECT * FROM t1; + +DELETE FROM t1 WHERE a>5; +DELETE FROM t1 WHERE b=99 AND d>4; +--sorted_result +SELECT * FROM t1; + +TRUNCATE TABLE t1; +DROP TABLE t1; + +## secondary indexes live in reverse column families +CREATE TABLE t1 (a INT, b CHAR(8), KEY ka(a) comment 'rev:cf1', KEY kb(b) +comment 'rev:cf1', KEY kab(a,b) comment 'rev:cf2') ENGINE=rocksdb; +--source no_primary_key_basic_ops.inc +DROP TABLE t1; + +## https://github.com/facebook/mysql-5.6/issues/209 +## Accidental single delete caused data inconsistency +CREATE TABLE t1 (col1 int, col2 int, KEY kcol1(col1)) ENGINE=ROCKSDB; +INSERT INTO t1 (col1, col2) values (2,2); +ALTER TABLE t1 ADD COLUMN extra INT; +UPDATE t1 SET col2 = 1; +select * from t1; +DELETE FROM t1 WHERE col1 = 2; + +# flush memtable to cause compaction to occur. +# During compaction, if a SingleDelete occurs then the delete marker and the +# key it is deleting are both removed. This will cause data inconsistency if +# SingleDelete is called on PK, since we do multiple Put() operations to update +# primary keys. +set global rocksdb_force_flush_memtable_now = true; + +select * from t1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test b/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test new file mode 100644 index 0000000000000..2603311da556e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/alter_table.test @@ -0,0 +1,94 @@ +--source include/have_rocksdb.inc + +# +# Basic ALTER TABLE statements. +# +# USAGE of table options in ALTER statements +# is covered in tbl_standard_opts and tbl_opt*.tests. +# +# Index operations are covered in index* tests. +# +# ALTER OFFLINE is not covered as it is not supported, as of 5.5.23 +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, c CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,5,'z'); + +# Column operations + +ALTER TABLE t1 ADD COLUMN b INT; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ALTER COLUMN a SET DEFAULT '0'; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ALTER a DROP DEFAULT; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHANGE COLUMN b b1 CHAR(8) FIRST; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHANGE b1 b INT AFTER c; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHANGE b b CHAR(8); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MODIFY COLUMN b INT; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MODIFY COLUMN b CHAR(8) FIRST; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MODIFY COLUMN b INT AFTER a; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DROP COLUMN b; +SHOW CREATE TABLE t1; + + +# Rename table + +ALTER TABLE t1 RENAME TO t2; +--error ER_NO_SUCH_TABLE +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; +DROP TABLE t2; + + +# ORDER BY +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b INT) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,5),(2,2,2),(3,4,3); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ORDER BY b ASC, a DESC, pk DESC; +SHOW CREATE TABLE t1; +SELECT * FROM t1; +DROP TABLE t1; + + +# Character set, collate + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT, b CHAR(8), c CHAR(8)) ENGINE=rocksdb CHARACTER SET latin1 COLLATE latin1_general_cs; +INSERT INTO t1 VALUES (1,5,'z','t'); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CONVERT TO CHARACTER SET utf8; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DEFAULT CHARACTER SET = latin1 COLLATE latin1_general_ci; +SHOW CREATE TABLE t1; + + +# A 'null' ALTER operation + +ALTER TABLE t1 FORCE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test new file mode 100644 index 0000000000000..10722194121ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/analyze_table.test @@ -0,0 +1,31 @@ +--source include/have_rocksdb.inc + +# +# ANALYZE TABLE statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1,'a'),(2,2,'b'); +CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (3,3,'c'); +ANALYZE TABLE t1; +INSERT INTO t2 VALUES (1,4,'d'); +ANALYZE NO_WRITE_TO_BINLOG TABLE t2; +INSERT INTO t1 VALUES (4,5,'e'); +INSERT INTO t2 VALUES (2,6,'f'); +ANALYZE LOCAL TABLE t1, t2; + +DROP TABLE t1, t2; + + --let $create_definition = a $int_indexed_col, $default_index(a) +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7); +ANALYZE TABLE t1; +INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12); +ANALYZE TABLE t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test b/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test new file mode 100644 index 0000000000000..4f759a8ec6008 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/apply_changes_iter.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc + +# Tests the Apply_changes_iter class for walking forward and backwards +# with data in both the transaction class and in the rocksdb storage layer + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +CREATE TABLE t1 ( + pk INT NOT NULL PRIMARY KEY, + key1 INT NOT NULL, + KEY (key1) +) ENGINE=ROCKSDB; + +INSERT INTO t1 VALUES (12,12); +INSERT INTO t1 VALUES (6,6); +BEGIN; +INSERT INTO t1 VALUES (8,8), (10,10); +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +SELECT * FROM t1 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +SELECT * FROM t1 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +ROLLBACK; + +CREATE TABLE t2 ( + pk INT NOT NULL PRIMARY KEY, + key1 INT NOT NULL, + KEY (key1) COMMENT 'rev:cf' +) ENGINE=ROCKSDB; + +INSERT INTO t2 VALUES (12,12); +INSERT INTO t2 VALUES (6,6); +BEGIN; +INSERT INTO t2 VALUES (8,8), (10,10); +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 DESC; +SELECT * FROM t2 WHERE key1 BETWEEN 4 and 11 ORDER BY KEY1 ASC; +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 DESC; +SELECT * FROM t2 IGNORE INDEX(key1) WHERE key1 BETWEEN 4 and 11 ORDER BY key1 ASC; +ROLLBACK; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test new file mode 100644 index 0000000000000..68ad21bea1c7a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_secondary.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (pk INT PRIMARY KEY, a INT AUTO_INCREMENT, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (pk) VALUES (3), (2), (1); +SELECT * FROM t1; + +--source include/restart_mysqld.inc + +INSERT INTO t1 (pk) VALUES (4); +SELECT * FROM t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test new file mode 100644 index 0000000000000..2fe0a2e3c08ce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars.test @@ -0,0 +1,67 @@ +--source include/have_rocksdb.inc + +# +# auto-increment-offset and auto-increment-increment +# + +############################################ +# TODO: +# This test currently produces wrong result +# on the line 36 of the result file and further +# due to bug MySQL:47118. +# When/if the bug is fixed, +# the result will need to be updated +############################################ + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo #--------------------------- +--echo # auto_increment_offset +--echo #--------------------------- +SET auto_increment_offset = 200; + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; + +# If auto_increment_offset is greater than auto_increment_increment, +# the offset is ignored + +INSERT INTO t1 (a,b) VALUES (NULL,'a'),(NULL,'b'),(NULL,'c'); +SELECT LAST_INSERT_ID(); +SELECT a,b FROM t1 ORDER BY a; + +--echo #--------------------------- +--echo # auto_increment_increment +--echo #--------------------------- + +SET auto_increment_increment = 300; +# offset should not be ignored anymore + +INSERT INTO t1 (a,b) VALUES (NULL,'d'),(NULL,'e'),(NULL,'f'); +SELECT LAST_INSERT_ID(); +SELECT a,b FROM t1 ORDER BY a; + +SET auto_increment_increment = 50; +INSERT INTO t1 (a,b) VALUES (NULL,'g'),(NULL,'h'),(NULL,'i'); +SELECT LAST_INSERT_ID(); +SELECT a,b FROM t1 ORDER BY a; +DROP TABLE t1; + + +--echo #--------------------------- +--echo # offset is greater than the max value +--echo #--------------------------- + +SET auto_increment_increment = 500; +SET auto_increment_offset = 300; + +CREATE TABLE t1 (a TINYINT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +--echo # In MariaDB, this is an error: +--error HA_ERR_AUTOINC_ERANGE +INSERT INTO t1 (a) VALUES (NULL); +SELECT LAST_INSERT_ID(); +SELECT a FROM t1 ORDER BY a; +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test new file mode 100644 index 0000000000000..78521fbc9ef65 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread.test @@ -0,0 +1,59 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +--echo #--------------------------- +--echo # two threads inserting simultaneously with increment > 1 +--echo # Issue #390 +--echo #--------------------------- + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +# Set up connections +connect (con1, localhost, root,,); +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; +# Insert one row to set up the conditions that caused the original failure +INSERT INTO t1 VALUES(NULL); + +connect (con2, localhost, root,,); +SET auto_increment_increment = 2; +SET auto_increment_offset = 1; + +connect (con3, localhost, root,,); + +# Start each thread on an insert that will block waiting for a signal +connection con1; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked1 WAIT_FOR go1'; +send INSERT INTO t1 VALUES(NULL); + +connection con2; +SET debug_sync='rocksdb.autoinc_vars SIGNAL parked2 WAIT_FOR go2'; +send INSERT INTO t1 VALUES(NULL); + +# Wait for both threads to be at debug_sync point +connection default; +SET debug_sync='now WAIT_FOR parked1'; +SET debug_sync='now WAIT_FOR parked2'; + +# Signal both threads to continue +send SET debug_sync='now SIGNAL go1'; +connection con3; +SET debug_sync='now SIGNAL go2'; +connection default; +reap; + +connection con1; +reap; + +connection con2; +reap; + +connection default; +SET debug_sync='RESET'; + +disconnect con1; +disconnect con2; + +SELECT * FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test new file mode 100644 index 0000000000000..b64af16411b7f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoinc_vars_thread_2.test @@ -0,0 +1,142 @@ +--source include/have_rocksdb.inc + +--echo #--------------------------- +--echo # ten threads inserting simultaneously with increment > 1 +--echo # Issue #390 +--echo #--------------------------- + +# Run 10 simulatenous threads each inserting 10,000 rows +let $num_threads = 10; +let $num_rows_per_thread = 100000; + +# Create the table with an AUTO_INCREMENT primary key and a separate colum +# to store which thread created the row +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, thr INT) ENGINE=rocksdb; + +# For each thread... +# 1) set up a connection +# 2) create a file that can be used for LOAD DATA INFILE ... +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + # Set up connection + connect (con$i, localhost, root,,); + + # Set up the auto_increment_* variables for each thread + eval SET auto_increment_increment = 100; + eval SET auto_increment_offset = $i + 1; + let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`; + + # Pass variables into perl + let ROCKSDB_INFILE = $file; + let ROCKSDB_THREAD = `SELECT $i`; + let ROCKSDB_ROWS_PER_THREAD = `SELECT $num_rows_per_thread`; + + # Create a file to load + perl; + my $fn = $ENV{'ROCKSDB_INFILE'}; + my $thr = $ENV{'ROCKSDB_THREAD'}; + my $num = $ENV{'ROCKSDB_ROWS_PER_THREAD'}; + open(my $fh, '>>', $fn) || die "perl open($fn): $!"; + binmode $fh; + for (my $ii = 0; $ii < $num; $ii++) + { + print $fh "\\N\t$thr\n" + } + close($fh); + EOF +} + +# For each connection start the LOAD DATA INFILE in the background +connection default; +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + connection con$i; + let $file = `SELECT CONCAT(@@datadir, "test_insert_", $i, ".txt")`; + --disable_query_log + --echo LOAD DATA INFILE INTO TABLE t1; + send_eval LOAD DATA INFILE '$file' INTO TABLE t1; + --enable_query_log +} + +# Reap each connection's background result +connection default; +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + connection con$i; + reap; +} + +# Make sure we have the required number of rows +connection default; +SELECT COUNT(*) FROM t1; +SELECT thr, COUNT(pk) FROM t1 GROUP BY thr; + +# Cleanup the connection and file used for LOAD DATA INFILE +let $i = `SELECT $num_threads`; +while ($i > 0) +{ + dec $i; + + disconnect con$i; + let $file = `SELECT CONCAT(@@datadir, "test_insert_", "$i", ".txt")`; + remove_file $file; +} + +# Validate each row. For each row, the created 'thr' column shows which +# thread created the row. The pk that was automatically generated should +# therefore match a certain pattern. For thread 0, the pk should be in +# the sequence [1, 101, 201, 301, ...]; for thread 1, it should be in the +# sequence [2, 102, 202, 302, ...], etc. The pk for each row should be +# smallest value in the sequence for thread 'thr' that is greater than +# the pk in the previous row. +let $file = `SELECT CONCAT(@@datadir, "test_export.txt")`; +--disable_query_log +--echo SELECT * FROM t1 ORDER BY pk INTO OUTFILE ; +eval SELECT * FROM t1 ORDER BY pk INTO OUTFILE "$file"; +--enable_query_log + +let ROCKSDB_OUTFILE = $file; + +perl; +my $fn = $ENV{'ROCKSDB_OUTFILE'}; +my $last_pk = 0; +open(my $fh, '<', $fn) || die "perl open($fn): $!"; +while (<$fh>) +{ + if ($_ =~ m/^(.*)\t(.*)$/) + { + my $pk = $1; + my $thr = $2; + + my $expected_pk = int($last_pk / 100) * 100 + ($thr + 1); + $expected_pk += 100 if $expected_pk <= $last_pk; + + if ($expected_pk != $pk) + { + die "Incorrect next pk ($pk); expected $expected_pk (previous: $last_pk)" + } + + $last_pk = $pk; + } + else + { + die "output file has incorrect format: $_"; + } +} +print stdout "All pk values matched their expected values\n"; +EOF + +remove_file $file; + +# Drop the table to finally clean up +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test new file mode 100644 index 0000000000000..375571f705dd0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/autoincrement.test @@ -0,0 +1,3 @@ +--source include/have_rocksdb.inc + +--echo # The test checks AUTO_INCREMENT capabilities that are not supported by RocksDB-SE. diff --git a/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf new file mode 100644 index 0000000000000..a76f1244bab8b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.cnf @@ -0,0 +1,11 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +sync_binlog=0 +binlog_format=row +slave-exec-mode=strict + +[mysqld.2] +sync_binlog=0 +binlog_format=row +slave-exec-mode=strict diff --git a/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test new file mode 100644 index 0000000000000..e5f70be4c3bbb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/blind_delete_without_tx_api.test @@ -0,0 +1,130 @@ +--source include/have_rocksdb.inc +--source include/have_binlog_format_row.inc + +source include/master-slave.inc; + +connection master; + +set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key; +set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api; + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; + +--disable_query_log +let $t = 1; +while ($t <= 2) { + let $i = 1; + while ($i <= 10000) { + let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log + +SET session rocksdb_blind_delete_primary_key=1; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +# Deleting 1000 rows from t1 +--disable_query_log +let $i = 1; +while ($i <= 1000) { + let $insert = DELETE FROM t1 WHERE id=$i; + inc $i; + eval $insert; +} +--enable_query_log +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +connection master; + +# Deleting 1000 rows from t2 (blind delete disabled because of secondary key) +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +--disable_query_log +let $i = 1; +while ($i <= 1000) { + let $insert = DELETE FROM t2 WHERE id=$i; + inc $i; + eval $insert; +} +--enable_query_log +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t2; + +SET session rocksdb_master_skip_tx_api=1; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +--disable_query_log +let $t = 1; +while ($t <= 2) { + let $i = 1001; + while ($i <= 2000) { + let $insert = DELETE FROM t$t WHERE id=$i; + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +connection master; + + +# Range Deletes (blind delete disabled) +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000; +DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind'; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +SELECT count(*) FROM t2; +connection master; + + +# Deleting same keys (slaves stop) +DELETE FROM t1 WHERE id = 10; +SELECT count(*) FROM t1; +connection slave; +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 't1'.*"); +# wait until we have the expected error +--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND) +--source include/wait_for_slave_sql_error.inc + +connection slave; +set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables; +set global rocksdb_read_free_rpl_tables="t.*"; +START SLAVE; +connection master; +--source include/sync_slave_sql_with_master.inc +connection slave; +SELECT count(*) FROM t1; +connection master; + + +# cleanup +connection slave; +set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables; +connection master; +SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key; +SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api; + +DROP TABLE t1, t2; +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt new file mode 100644 index 0000000000000..8600e9e415ca0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt @@ -0,0 +1,2 @@ +--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 +--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc new file mode 100644 index 0000000000000..b388a8036ad51 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc @@ -0,0 +1,59 @@ +--source include/have_rocksdb.inc + +DELIMITER //; +CREATE PROCEDURE bloom_start() +BEGIN + select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +END// +CREATE PROCEDURE bloom_end() +BEGIN +select case when variable_value-@c > 0 then 'true' else 'false' end as checked from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +END// +DELIMITER ;// + + +#BF is sometimes invoked and useful +--let $CF= +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc + +#BF is always invoked but not useful at all +--let $CF=COMMENT 'cf_short_prefix' +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc + + +#BF is most of the time invoked and useful +--let $CF=COMMENT 'cf_long_prefix' +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc + +# BUG: Prev() with prefix lookup should not use prefix bloom filter +create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; +--disable_query_log +let $max = 100000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO r1 VALUES ($i,$i,$i,$i,$i); + inc $i; + eval $insert; +} +--enable_query_log +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 asc; +call bloom_end(); +call bloom_start(); +select * from r1 where id1=1 and id2 in (1) order by id3 desc; +call bloom_end(); + +# cleanup +DROP PROCEDURE bloom_start; +DROP PROCEDURE bloom_end; +truncate table t1; +optimize table t1; +truncate table t2; +optimize table t2; +drop table if exists t1; +drop table if exists t2; +drop table if exists r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test new file mode 100644 index 0000000000000..efcf9ee1f73cd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.test @@ -0,0 +1 @@ +--source bloomfilter.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt new file mode 100644 index 0000000000000..f3824106b25b6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:24 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test new file mode 100644 index 0000000000000..c4f1570ec4191 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter2.test @@ -0,0 +1,103 @@ +--source include/have_rocksdb.inc + +## Test 0: Eq cond len includs VARCHAR, and real cond len < prefix bloom len < VARCHAR definition len +CREATE TABLE t0 (id1 VARCHAR(30), id2 INT, value INT, PRIMARY KEY (id1, id2)) ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO t0 VALUES('X', $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +# BF not used +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t0 WHERE id1='X' AND id2>=1; +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t0; + + +## Test 1: Eq cond len is shorter than prefix bloom len +CREATE TABLE t1 (id1 BIGINT, id2 INT, id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb; + +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO t1 VALUES(1, 1, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +# BF not used (4+8+4=16) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3>=2; +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +# BF not used (4+8=12) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2>=1 AND id3>=2; +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t1; + + +## Test 2: Long IN and short IN (varchar) -- can_use_bloom_filter changes within the same query +CREATE TABLE t2 (id1 INT, id2 VARCHAR(100), id3 BIGINT, value INT, PRIMARY KEY (id1, id2, id3)) ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO t2 VALUES($i, $i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +# BF used for large cond, not used for short cond +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=100 and id2 IN ('00000000000000000000', '100'); +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('00000000000000000000', '200'); +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +# BF not used because cond length is too small in all cases +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +select count(*) from t2 WHERE id1=200 and id2 IN ('3', '200'); +select case when variable_value-@u = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t2; + + +## Test 3: Eq cond len is longer than prefix bloom len +CREATE TABLE t3 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4)) ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + if ($i != 5000) { + let $insert = INSERT INTO t3 VALUES(1, $i, $i, $i); + eval $insert; + } + inc $i; +} +--enable_query_log + +# Full BF works with Get(), Block based does not. +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=5000 AND id3=1 AND id4=1; +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_useful'; + +# BF used (4+8+8+8) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1; +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; +SELECT COUNT(*) FROM t3 WHERE id1=1 AND id2=1 AND id3=1 AND id4 <= 500; +select case when variable_value-@u > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_useful'; + +DROP TABLE t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt new file mode 100644 index 0000000000000..ef6d0fd554adb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3-master.opt @@ -0,0 +1,3 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test new file mode 100644 index 0000000000000..a15e2a8969353 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter3.test @@ -0,0 +1,118 @@ +--source include/have_rocksdb.inc + +--source include/restart_mysqld.inc +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type', + KEY `id1_type2` (`id1`,`link_type`,`time`,`version`,`data`,`visibility`) COMMENT 'rev:cf_link_id1_type2', + KEY `id1_type3` (`id1`,`visibility`,`time`,`version`,`data`,`link_type`) COMMENT 'rev:cf_link_id1_type3' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; + +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO linktable VALUES($i, $i, $i, $i, 1, 1, $i, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +## HA_READ_PREFIX_LAST_OR_PREV +# BF len 21 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 and time <= 9223372036854775807 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# BF len 13 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and time >= 0 and time <= 9223372036854775807 and visibility = 1 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +## HA_READ_PREFIX_LAST_OR_PREV (no end range) +# BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 and time >= 0 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# BF len 19 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 and time >= 0 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--echo ## HA_READ_PREFIX_LAST +--echo # BF len 20 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type`) where id1 = 100 and link_type = 1 and visibility = 1 order by time desc; +select case when variable_value-@c > 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--echo # BF len 19 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type2`) where id1 = 100 and link_type = 1 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--echo # BF len 12 +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select id1, id2, link_type, visibility, data, time, version from linktable FORCE INDEX(`id1_type3`) where id1 = 100 and visibility = 1 order by time desc; +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + + +DROP TABLE linktable; +--source include/restart_mysqld.inc + +--echo # +--echo # bloom filter prefix is 20 byte +--echo # Create a key which is longer than that, so that we see that +--echo # eq_cond_len= slice.size() - 1; +--echo # doesnt work. +--echo # +--echo # indexnr 4 +--echo # kp0 + 4 = 8 +--echo # kp1 + 8 = 16 +--echo # kp2 + 8 = 24 24>20 byte length prefix +--echo # kp3 + 8 = 28 + +create table t1 ( + pk int primary key, + kp0 int not null, + kp1 bigint not null, + kp2 bigint not null, + kp3 bigint not null, + key kp12(kp0, kp1, kp2, kp3) comment 'rev:x1' +) engine=rocksdb; + +insert into t1 values (1, 1,1, 1,1); +insert into t1 values (10,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (11,1,1,0x12FFFFFFFFFF,1); +insert into t1 values (20,2,2,0x12FFFFFFFFFF,1); +insert into t1 values (21,2,2,0x12FFFFFFFFFF,1); + +--source include/restart_mysqld.inc + +--replace_column 9 # +explain +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +show status like '%rocksdb_bloom_filter_prefix%'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc; +show status like '%rocksdb_bloom_filter_prefix%'; +--echo # The following MUST show TRUE: +select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +drop table t1; +# Key length is 4 + 8 + 8 = 20 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt new file mode 100644 index 0000000000000..0a32575796212 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=16k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:12 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test new file mode 100644 index 0000000000000..76ec6ca101f3c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter4.test @@ -0,0 +1,52 @@ +--source include/have_rocksdb.inc + +# Fixing issue#230 -- Prefix bloom filter + reverse column family misses some rows +# This test inserts 20,000 rows into t1, then selecting one by one from stored procedure. +# If the select does not return any row, it is wrong. + +CREATE TABLE t1 ( + `id1` int unsigned NOT NULL DEFAULT '0', + `id2` int unsigned NOT NULL DEFAULT '0', + `link_type` int unsigned NOT NULL DEFAULT '0', + `visibility` tinyint NOT NULL DEFAULT '0', + `data` varchar(255) NOT NULL DEFAULT '', + `time` int unsigned NOT NULL DEFAULT '0', + `version` int unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (id1, link_type, visibility, id2) COMMENT 'rev:cf_link_pk' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; + +DELIMITER //; +CREATE PROCEDURE select_test() +BEGIN + DECLARE id1_cond INT; + SET id1_cond = 1; + WHILE id1_cond <= 20000 DO + SELECT count(*) AS cnt FROM (SELECT id1 FROM t1 FORCE INDEX (PRIMARY) WHERE id1 = id1_cond AND link_type = 1 AND visibility = 1 ORDER BY id2 DESC) AS t INTO @cnt; + IF @cnt < 1 THEN + SELECT id1_cond, @cnt; + END IF; + SET id1_cond = id1_cond + 1; + END WHILE; +END// +DELIMITER ;// + +--disable_query_log +let $i = 1; +while ($i <= 20000) { + let $insert = INSERT INTO t1 VALUES($i, $i, 1, 1, $i, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +--echo "Skipping bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=1; +CALL select_test(); + +--echo "Using bloom filter" +SET session rocksdb_skip_bloom_filter_on_read=0; +CALL select_test(); + +DROP PROCEDURE select_test; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc new file mode 100644 index 0000000000000..5c122d6bd19ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_load_select.inc @@ -0,0 +1,189 @@ +source include/have_sequence.inc; + +# loading some data (larger than write buf size) to cause compaction +insert t1 + select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; + +# BF conditions (prefix short(4B)|medium(20B)|long(240B)) +#0 no eq condition (o, x, x) +## cond length 4, key length > 4 +call bloom_start(); +select count(*) from t1; +call bloom_end(); +call bloom_start(); +select count(*) from t2; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +call bloom_end(); + +#1 cond length == prefix length (o, o, x) +## cond length 4+8+8=20, key length > 20 +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +call bloom_end(); +## (cond_length == extended_key_length(4+8+4+4=20) == prefix_length) +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +call bloom_end(); + +#2 cond length < actual key length and cond_length < prefix length (o, x, x) +## for long prefix key, most cases falling into this category, unless all key colums are used. +## cond length 4+8=12, key length > 12 +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +call bloom_end(); + +#3 both actual key length and cond length >= prefix length (o, o, o/x) +## cond length 4+8+9+8+4=33 +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +call bloom_end(); +## 4+8+9=25 +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +call bloom_end(); + +#4 actual key length > prefix length and cond length < prefix length (o, x, x) +## cond length 4+8=12 +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +call bloom_end(); + +#5 cond length == extended key length < prefix length (o, o, o) +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +call bloom_end(); +## 4+9+4=17 +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +call bloom_end(); + +#6 cond length == non-extended key length < prefix length, actual key length > prefix length (o, x, x) +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +call bloom_end(); +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +call bloom_end(); +## 4+9+4=17 +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +call bloom_end(); +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +call bloom_end(); + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt new file mode 100644 index 0000000000000..5c62c7cf986b8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip-master.opt @@ -0,0 +1,3 @@ +--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 +--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240} +--rocksdb_skip_bloom_filter_on_read=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test new file mode 100644 index 0000000000000..efcf9ee1f73cd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_skip.test @@ -0,0 +1 @@ +--source bloomfilter.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc new file mode 100644 index 0000000000000..2bc9bb64d5eb0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter_table_def.inc @@ -0,0 +1,33 @@ +eval create or replace table t1 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id1, id2, id3, id4) $CF, + index id2 (id2) $CF, + index id2_id1 (id2, id1) $CF, + index id2_id3 (id2, id3) $CF, + index id2_id4 (id2, id4) $CF, + index id2_id3_id1_id4 (id2, id3, id1, id4) $CF, + index id3_id2 (id3, id2) $CF +) engine=ROCKSDB; + +eval create or replace table t2 ( + id1 bigint not null, + id2 bigint not null, + id3 varchar(100) not null, + id4 int not null, + id5 int not null, + value bigint, + value2 varchar(100), + primary key (id4) $CF, + index id2 (id2) $CF, + index id2_id3 (id2, id3) $CF, + index id2_id4 (id2, id4) $CF, + index id2_id4_id5 (id2, id4, id5) $CF, + index id3_id4 (id3, id4) $CF, + index id3_id5 (id3, id5) $CF +) engine=ROCKSDB; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test new file mode 100644 index 0000000000000..283cedcb6bb0e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -0,0 +1,124 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc + +# This test requires ~2.3G of disk space +--source include/big_test.inc + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +# Create a table with a primary key and one secondary key as well as one +# more column +CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; + +# Create a second identical table to validate that bulk loading different +# tables in the same session works +CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; + +# Create a third table using partitions to validate that bulk loading works +# across a partitioned table +CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' + PARTITION BY KEY() PARTITIONS 4; + +--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` + +# Create a text file with data to import into the table. +# The primary key is in sorted order and the secondary keys are randomly generated +--let ROCKSDB_INFILE = $file +perl; +my $fn = $ENV{'ROCKSDB_INFILE'}; +open(my $fh, '>>', $fn) || die "perl open($fn): $!"; +my $max = 5000000; +my @chars = ("A".."Z", "a".."z", "0".."9"); +my @lowerchars = ("a".."z"); +my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); +for (my $ii = 0; $ii < $max; $ii++) +{ + my $pk; + my $tmp = $ii; + foreach (@powers_of_26) + { + $pk .= $lowerchars[$tmp / $_]; + $tmp = $tmp % $_; + } + + my $num = int(rand(25)) + 6; + my $a; + $a .= $chars[rand(@chars)] for 1..$num; + + $num = int(rand(25)) + 6; + my $b; + $b .= $chars[rand(@chars)] for 1..$num; + print $fh "$pk\t$a\t$b\n"; +} +close($fh); +EOF + +--file_exists $file + +# Make sure a snapshot held by another user doesn't block the bulk load +connect (other,localhost,root,,); +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection default; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +--disable_query_log +--echo LOAD DATA INFILE INTO TABLE t1; +eval LOAD DATA INFILE '$file' INTO TABLE t1; +--echo LOAD DATA INFILE INTO TABLE t2; +eval LOAD DATA INFILE '$file' INTO TABLE t2; +--echo LOAD DATA INFILE INTO TABLE t3; +eval LOAD DATA INFILE '$file' INTO TABLE t3; +--enable_query_log +set rocksdb_bulk_load=0; + +# Make sure row count index stats are correct +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +ANALYZE TABLE t1, t2, t3; + +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +# Make sure all the data is there. +select count(pk) from t1; +select count(a) from t1; +select count(b) from t1; +select count(pk) from t2; +select count(a) from t2; +select count(b) from t2; +select count(pk) from t3; +select count(a) from t3; +select count(b) from t3; + +# Create a dummy file with a bulk load extesion. It should be removed when +# the server starts +--let $tmpext = .bulk_load.tmp +--let $MYSQLD_DATADIR= `SELECT @@datadir` +--let $datadir = $MYSQLD_DATADIR/.rocksdb +--write_file $datadir/test$tmpext +dummy data +EOF +--write_file $datadir/longfilenamethatvalidatesthatthiswillgetdeleted$tmpext +dummy data +EOF + +# Show the files exists +--list_files $datadir *$tmpext + +# Now restart the server and make sure it automatically removes this test file +--source include/restart_mysqld.inc + +# Show the files do not exist +--list_files $datadir *$tmpext + +# Cleanup +disconnect other; +DROP TABLE t1, t2, t3; +--remove_file $file diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt new file mode 100644 index 0000000000000..2cd3c8051f87f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality-master.opt @@ -0,0 +1,3 @@ +--skip-rocksdb_debug_optimizer_no_zero_cardinality +--rocksdb_compaction_sequential_deletes=0 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test new file mode 100644 index 0000000000000..0bc0ae4e90028 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test @@ -0,0 +1,43 @@ +--source include/have_rocksdb.inc + +--source include/restart_mysqld.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1( + id bigint not null primary key, + i1 bigint, #unique + i2 bigint, #repeating + c1 varchar(20), #unique + c2 varchar(20), #repeating + index t1_1(id, i1), + index t1_2(i1, i2), + index t1_3(i2, i1), + index t1_4(c1, c2), + index t1_5(c2, c1) +) engine=rocksdb; +--disable_query_log +let $i=0; +while ($i<100000) +{ + inc $i; + eval insert t1(id, i1, i2, c1, c2) values($i, $i, $i div 10, $i, $i div 10); +} +--enable_query_log + +# Flush memtable out to SST and display index cardinalities +optimize table t1; +show index in t1; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); + +--echo restarting... +--source include/restart_mysqld.inc + +# display index cardinalities after the restart +show index in t1; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py b/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py new file mode 100644 index 0000000000000..a3d50f305a4f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_log_for_xa.py @@ -0,0 +1,31 @@ +import sys +import re + +""" +Example usage: + python check_log_for_xa.py path/to/log/mysqld.2.err rollback,commit,prepare +""" + +log_path = sys.argv[1] +desired_filters = sys.argv[2] + +all_filters = [ + ('rollback', re.compile('(\[Note\] rollback xid .+)')), + ('commit', re.compile('(\[Note\] commit xid .+)')), + ('prepare', + re.compile('(\[Note\] Found \d+ prepared transaction\(s\) in \w+)')), +] + +active_filters = filter(lambda f: f[0] in desired_filters, all_filters) + +results = set() +with open(log_path) as log: + for line in log: + line = line.strip() + for f in active_filters: + match = f[1].search(line) + if match: + results.add("**found '%s' log entry**" % f[0]) + +for res in results: + print res diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc new file mode 100644 index 0000000000000..c108a97362db0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_table.inc @@ -0,0 +1,54 @@ +# +# CHECK TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support CHECK. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +CHECK TABLE t1; +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +CHECK TABLE t1, t2 FOR UPGRADE; +INSERT INTO t2 (a,b) VALUES (5,'e'); +CHECK TABLE t2 QUICK; +INSERT INTO t1 (a,b) VALUES (6,'f'); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a,b) VALUES (7,'g'); +INSERT INTO t2 (a,b) VALUES (8,'h'); +CHECK TABLE t2, t1 MEDIUM; +INSERT INTO t1 (a,b) VALUES (9,'i'); +INSERT INTO t2 (a,b) VALUES (10,'j'); +CHECK TABLE t1, t2 EXTENDED; +INSERT INTO t1 (a,b) VALUES (11,'k'); +CHECK TABLE t1 CHANGED; + +DROP TABLE t1, t2; + + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a)) ENGINE=rocksdb; +INSERT INTO t1 (a) VALUES (1),(2),(5); +CHECK TABLE t1; +INSERT INTO t1 (a) VALUES (6),(8),(12); +CHECK TABLE t1 FOR UPGRADE; +INSERT INTO t1 (a) VALUES (13),(15),(16); +CHECK TABLE t1 QUICK; +INSERT INTO t1 (a) VALUES (17),(120),(132); +CHECK TABLE t1 FAST; +INSERT INTO t1 (a) VALUES (801),(900),(7714); +CHECK TABLE t1 MEDIUM; +INSERT INTO t1 (a) VALUES (8760),(10023),(12000); +CHECK TABLE t1 EXTENDED; +INSERT INTO t1 (a) VALUES (13345),(24456),(78302),(143028); +CHECK TABLE t1 CHANGED; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/check_table.test b/storage/rocksdb/mysql-test/rocksdb/t/check_table.test new file mode 100644 index 0000000000000..4d349f7a167ba --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/check_table.test @@ -0,0 +1,12 @@ +--source include/have_rocksdb.inc + +# +# CHECK TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support CHECK. +# + +--source check_table.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test new file mode 100644 index 0000000000000..e5de6246f6099 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +--enable_warnings + +# Start from clean slate +#--source include/restart_mysqld.inc + +CREATE TABLE t1 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t2 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t3 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t4 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 1000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t2; +--source drop_table_repopulate_table.inc +let $table = t3; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +# Make sure new table gets unique indices +CREATE TABLE t5 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +let $max = 1000; +let $table = t5; +--source drop_table_repopulate_table.inc + +# Create checkpoint without trailing '/' +let $checkpoint = $MYSQL_TMP_DIR/checkpoint; +let $succeeds = 1; +--source set_checkpoint.inc + +# Create checkpoint with a trailing '/' +let $checkpoint = $MYSQL_TMP_DIR/checkpoint/; +let $succeeds = 1; +--source set_checkpoint.inc + +# Set checkpoint dir as empty string, which fails +let $checkpoint = ; +let $succeeds = 0; +--source set_checkpoint.inc + +# Set checkpoint as a directory that does not exist, which fails +let $checkpoint = /does/not/exist; +let $succeeds = 0; +--source set_checkpoint.inc + +# Set checkpoint as a directory that already exists, which fails +let $checkpoint = $MYSQL_TMP_DIR/already-existing-directory; +--mkdir $checkpoint +let $succeeds = 0; +--source set_checkpoint.inc +--exec rm -rf $checkpoint + +--disable_result_log +truncate table t1; +optimize table t1; +truncate table t2; +optimize table t2; +truncate table t3; +optimize table t3; +truncate table t4; +optimize table t4; +truncate table t5; +optimize table t5; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +drop table if exists t4; +drop table if exists t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test new file mode 100644 index 0000000000000..51c639a85dd49 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table.test @@ -0,0 +1,84 @@ +--source include/have_rocksdb.inc + +# +# CHECKSUM TABLE statements for standard CHECKSUM properties. +# Live checksums are covered in checksum_table_live.test +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=0; + +CHECKSUM TABLE t1; +CHECKSUM TABLE t2, t1; +CHECKSUM TABLE t1, t2 QUICK; +CHECKSUM TABLE t1, t2 EXTENDED; + +DROP TABLE t1, t2; + +--echo # +--echo # Issue #110: SQL command checksum returns inconsistent result +--echo # +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +checksum table t1; +select * from t1 where pk=2; +checksum table t1; +checksum table t1; +flush tables; +checksum table t1; +checksum table t1; + +drop table t1; + +--echo # +--echo # The following test is about making sure MyRocks CHECKSUM TABLE +--echo # values are the same as with InnoDB. +--echo # If you see checksum values changed, make sure their counterparts +--echo # in suite/innodb/r/checksum-matches-myrocks.result match. +--echo # + +create table t1 (pk int primary key, col1 varchar(10)) engine=rocksdb; +insert into t1 values (2,'fooo'); +insert into t1 values (1,NULL); +checksum table t1; +drop table t1; + +create table t1 ( + pk bigint unsigned primary key, + col1 varchar(10), + col2 tinyint, + col3 double +) engine=rocksdb; + +--echo # MariaDB has changed the checksumming algorithm +--echo # Enable the old algorithm: +set @tmp_old=@@old; +set old=1; + + +checksum table t1; + +insert into t1 values (1, NULL, NULL, NULL); +insert into t1 values (2, 'foo', NULL, NULL); +checksum table t1; + +insert into t1 values (3, NULL, 123, NULL); +insert into t1 values (4, NULL, NULL, 2.78); +checksum table t1; + +insert into t1 values (5, 'xxxYYYzzzT', NULL, 2.78); +insert into t1 values (6, '', NULL, 2.78); +checksum table t1; + +set old=@tmp_old; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test new file mode 100644 index 0000000000000..da278ed7f9b8c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/checksum_table_live.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc + +# +# CHECKSUM TABLE statements for live CHECKSUM. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +# For most engines CHECKSUM=1 option will be ignored, +# and the results will be different + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; + +CHECKSUM TABLE t1; +CHECKSUM TABLE t2, t1; +CHECKSUM TABLE t1, t2 QUICK; +CHECKSUM TABLE t1, t2 EXTENDED; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc new file mode 100644 index 0000000000000..2d3c9292441a8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null.inc @@ -0,0 +1,55 @@ +# +# NOT NULL attribute in columns +# +# Usage: +# let $col_type = ; +# let $col_default = ; +# --source col_not_null.inc +# +# We will add NOT NULL to the column options; +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo #---------------------------------- +--echo # $col_type NOT NULL columns without a default +--echo #---------------------------------- + +eval CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c $col_type NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; + +--error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); +eval INSERT INTO t1 (c) VALUES ($col_default); +SELECT HEX(c) FROM t1; + +DROP TABLE t1; + +--echo #---------------------------------- +--echo # $col_type NOT NULL columns with a default +--echo #---------------------------------- + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + c $col_type NOT NULL DEFAULT $col_default +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +--error ER_INVALID_DEFAULT +eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL; + +--error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); + +eval INSERT INTO t1 (c) VALUES ($col_default); +eval INSERT INTO t1 () VALUES (); + +# HEX should be universal for all column types +SELECT pk, HEX(c) FROM t1 ORDER BY pk; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc new file mode 100644 index 0000000000000..812ada6f48625 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_not_null_timestamp.inc @@ -0,0 +1,70 @@ +# +# NOT NULL attribute in TIMESTAMP columns +# +# This is a copy of col_not_null.inc, except that +# instead of getting an error on inserting NULL into a non-NULL column, +# we are getting the current timestamp (see MySQL:68472). +# If the bug is ever fixed, this include file won't be needed anymore. + + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo #---------------------------------- +--echo # $col_type NOT NULL column without a default +--echo #---------------------------------- + +eval CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, c $col_type NOT NULL) ENGINE=rocksdb; +SHOW COLUMNS IN t1; + +# Here where the non-standard behavior strikes: +# instead of an error we are getting the current timestamp + +# As of mysql-5.6.11, this no longer works, and we get an error: +# (MariaDB doesn't have this patch, so it doesnt produce an error): +# --error ER_BAD_NULL_ERROR +INSERT INTO t1 (c) VALUES (NULL); +eval INSERT INTO t1 (c) VALUES ($col_default); +SELECT HEX(c) FROM t1; + +DROP TABLE t1; + +--echo #---------------------------------- +--echo # $col_type NOT NULL columns with a default +--echo #---------------------------------- + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + c $col_type NOT NULL DEFAULT $col_default +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +--error ER_INVALID_DEFAULT +eval ALTER TABLE t1 ADD COLUMN err $col_type NOT NULL DEFAULT NULL; + +# Here where the non-standard behavior strikes: +# instead of an error we are getting the current timestamp + +# As of mysql-5.6.11, this no longer works, and we get an error: +# (MariaDB doesn't have this patch, so it doesnt produce an error): +# --error ER_BAD_NULL_ERROR + +# Since we don't produce an error, the row will get inserted. Make it +# deterministic: +set @save_ts=@@timestamp; +set timestamp=1478923914; + +INSERT INTO t1 (c) VALUES (NULL); +set timestamp=@save_ts; + +eval INSERT INTO t1 (c) VALUES ($col_default); +eval INSERT INTO t1 () VALUES (); + +# HEX should be universal for all column types +SELECT pk, HEX(c) FROM t1 ORDER BY pk; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc b/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc new file mode 100644 index 0000000000000..7ebfee0b1144e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_null.inc @@ -0,0 +1,34 @@ +# +# NULL attribute and DEFAULT NULL in columns +# +# Usage: +# let $col_type = ; +# let $col_default = ; +# --source col_null.inc +# +# We will add NULL attribute to the column options. +# + + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + c $col_type NULL, + c1 $col_type NULL DEFAULT NULL, + c2 $col_type NULL DEFAULT $col_default, + pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (c,c1,c2) VALUES (NULL,NULL,NULL); +eval INSERT INTO t1 (c,c1,c2) VALUES ($col_default,$col_default,$col_default); +INSERT INTO t1 () VALUES (); + +SELECT pk, HEX(c), HEX(c1), HEX(c2) FROM t1 ORDER BY pk; +SELECT pk, HEX(c2) FROM t1 ORDER BY pk; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test new file mode 100644 index 0000000000000..6f91ee7ca9a9f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_default.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +# +# Check whether DEFAULT column attribute +# is supported in CREATE and ALTER TABLE. +# If the attribute is supported at all, it will be covered +# in more details in col_option_null and col_option_not_null tests. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY DEFAULT '0') ENGINE=rocksdb; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a) VALUES (1); +SELECT a FROM t1; + +ALTER TABLE t1 ADD COLUMN b CHAR(8) DEFAULT ''; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (b) VALUES ('a'); +SELECT a,b FROM t1 ORDER BY a,b; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test new file mode 100644 index 0000000000000..1de4ccee0f777 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_not_null.test @@ -0,0 +1,229 @@ +--source include/have_rocksdb.inc + +# +# NOT NULL column attribute +# + +######################################### +# TODO: +# Currently the test produces incorrect (?) result +# due to bug MySQL:68472. If the bug is ever fixed, +# the test and result files will need to be updated. +######################################### + + +let $extra_col_opts = NOT NULL; + +--echo ######################## +--echo # BINARY columns +--echo ######################## + +--source type_binary.inc +--let $col_type = BINARY +--let $col_default = 0 +--source col_not_null.inc + +--echo ######################## +--echo # VARBINARY columns +--echo ######################## + +--source type_varbinary.inc +--let $col_type = VARBINARY(64) +--let $col_default = 'test' +--source col_not_null.inc + +--echo ######################## +--echo # BIT columns +--echo ######################## + +--source type_bit.inc +--let $col_type = BIT +--let $col_default = 1 +--source col_not_null.inc + +--echo ######################## +--echo # BLOB columns +--echo ######################## + +--source type_blob.inc + +--let $col_default = '' + +--let $col_type = BLOB +--source col_not_null.inc + +--let $col_type = TINYBLOB +--source col_not_null.inc + +--let $col_type = MEDIUMBLOB +--source col_not_null.inc + +--let $col_type = LONGBLOB +--source col_not_null.inc + +--echo ######################## +--echo # BOOL columns +--echo ######################## + +--source type_bool.inc +--let $col_type = BOOL +--let $col_default = '0' +--source col_not_null.inc + +--echo ######################## +--echo # CHAR columns +--echo ######################## + +--source type_char.inc +--let $col_type = CHAR +--let $col_default = '_' +--source col_not_null.inc + +--echo ######################## +--echo # VARCHAR columns +--echo ######################## + +--source type_varchar.inc +--let $col_type = VARCHAR(64) +--let $col_default = 'test default' +--source col_not_null.inc + +--echo ######################## +--echo # date and time columns +--echo ######################## + +set @col_opt_not_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; + +--source type_date_time.inc + +SET TIMESTAMP=UNIX_TIMESTAMP('2013-12-12 12:12:12'); + +--let $col_type = DATE +--let $col_default = '2012-12-21' +--source col_not_null.inc + +--let $col_type = DATETIME +--let $col_default = '2012-12-21 12:21:12' +--source col_not_null.inc + +# Even with explicit-defaults-for-timestamps, we still can't use +# the standard include file, due to bug MySQL:68472 + +--let $col_type = TIMESTAMP +--let $col_default = '2012-12-21 12:21:12' +--source col_not_null_timestamp.inc + +--let $col_type = TIME +--let $col_default = '12:21:12' +--source col_not_null.inc + +--let $col_type = YEAR +--let $col_default = '2012' +--source col_not_null.inc + +--let $col_type = YEAR(2) +--let $col_default = '12' +--source col_not_null.inc + +set time_zone= @col_opt_not_nullsave_time_zone; + +--echo ######################## +--echo # ENUM columns +--echo ######################## + +--source type_enum.inc + +--let $col_type = ENUM('test1','test2','test3') +--let $col_default = 'test2' +--source col_not_null.inc + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +--let $col_type = DECIMAL +--let $col_default = 1.1 +--source col_not_null.inc + +--let $col_type = NUMERIC +--let $col_default = 0 +--source col_not_null.inc + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +--let $col_type = FLOAT +--let $col_default = 1.1 +--source col_not_null.inc + +--let $col_type = DOUBLE +--let $col_default = 0 +--source col_not_null.inc + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +--let $col_type = INT +--let $col_default = 2147483647 +--source col_not_null.inc + +--let $col_type = TINYINT +--let $col_default = 127 +--source col_not_null.inc + +--let $col_type = SMALLINT +--let $col_default = 0 +--source col_not_null.inc + +--let $col_type = MEDIUMINT +--let $col_default = 1 +--source col_not_null.inc + +--let $col_type = BIGINT +--let $col_default = 9223372036854775807 +--source col_not_null.inc + +--echo ######################## +--echo # SET columns +--echo ######################## + +--source type_set.inc +--let $col_type = SET('test1','test2','test3') +--let $col_default = 'test2,test3' +--source col_not_null.inc + +--echo ######################## +--echo # TEXT columns +--echo ######################## + +--source type_text.inc + +--let $col_default = '' + +--let $col_type = TEXT +--source col_not_null.inc + +--let $col_type = TINYTEXT +--source col_not_null.inc + +--let $col_type = MEDIUMTEXT +--source col_not_null.inc + +--let $col_type = LONGTEXT +--source col_not_null.inc + + +--let $col_type = +--let $col_default = +--let $extra_col_opts = + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test new file mode 100644 index 0000000000000..c41abb786426f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_null.test @@ -0,0 +1,220 @@ +--source include/have_rocksdb.inc + +# +# NULL column attribute +# + +let $extra_col_opts = NULL; + + +--echo ######################## +--echo # BINARY columns +--echo ######################## + +--source type_binary.inc +--let $col_type = BINARY +--let $col_default = 0 +--source col_null.inc + +--echo ######################## +--echo # VARBINARY columns +--echo ######################## + +--source type_varbinary.inc +--let $col_type = VARBINARY(64) +--let $col_default = 'test' +--source col_null.inc + +--echo ######################## +--echo # BIT columns +--echo ######################## + +--source type_bit.inc +--let $col_type = BIT +--let $col_default = 1 +--source col_null.inc + +--echo ######################## +--echo # BLOB columns +--echo ######################## + +--source type_blob.inc + +--let $col_default = '' + +--let $col_type = BLOB +--source col_null.inc + +--let $col_type = TINYBLOB +--source col_null.inc + +--let $col_type = MEDIUMBLOB +--source col_null.inc + +--let $col_type = LONGBLOB +--source col_null.inc + +--echo ######################## +--echo # BOOL columns +--echo ######################## + +--source type_bool.inc +--let $col_type = BOOL +--let $col_default = '0' +--source col_null.inc + + +--echo ######################## +--echo # CHAR columns +--echo ######################## + +--source type_char.inc +--let $col_type = CHAR +--let $col_default = '_' +--source col_null.inc + +--echo ######################## +--echo # VARCHAR columns +--echo ######################## + + +--source type_varchar.inc +--let $col_type = VARCHAR(64) +--let $col_default = 'test default' +--source col_null.inc + + +--echo ######################## +--echo # date and time columns +--echo ######################## + +set @col_opt_nullsave_time_zone=@@time_zone; +set time_zone='UTC'; + +--source type_date_time.inc + +--let $col_type = DATE +--let $col_default = '2012-12-21' +--source col_null.inc + +--let $col_type = DATETIME +--let $col_default = '2012-12-21 12:21:12' +--source col_null.inc + +--let $col_type = TIMESTAMP +--let $col_default = '2012-12-21 12:21:12' +--source col_null.inc + +--let $col_type = TIME +--let $col_default = '12:21:12' +--source col_null.inc + +--let $col_type = YEAR +--let $col_default = '2012' +--source col_null.inc + +--let $col_type = YEAR(2) +--let $col_default = '12' +--source col_null.inc + +set time_zone=@col_opt_nullsave_time_zone; + +--echo ######################## +--echo # ENUM columns +--echo ######################## + +--source type_enum.inc +--let $col_type = ENUM('test1','test2','test3') +--let $col_default = 'test2' +--source col_null.inc + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +--let $col_type = DECIMAL +--let $col_default = 1.1 +--source col_null.inc + +--let $col_type = NUMERIC +--let $col_default = 0 +--source col_null.inc + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +--let $col_type = FLOAT +--let $col_default = 1.1 +--source col_null.inc + +--let $col_type = DOUBLE +--let $col_default = 0 +--source col_null.inc + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +--let $col_type = INT +--let $col_default = 2147483647 +--source col_null.inc + +--let $col_type = TINYINT +--let $col_default = 127 +--source col_null.inc + +--let $col_type = SMALLINT +--let $col_default = 0 +--source col_null.inc + +--let $col_type = MEDIUMINT +--let $col_default = 1 +--source col_null.inc + +--let $col_type = BIGINT +--let $col_default = 9223372036854775807 +--source col_null.inc + +--echo ######################## +--echo # SET columns +--echo ######################## + +--source type_set.inc +--let $col_type = SET('test1','test2','test3') +--let $col_default = 'test2,test3' +--source col_null.inc + + +--echo ######################## +--echo # TEXT columns +--echo ######################## + +--source type_text.inc + +--let $col_default = '' + +--let $col_type = TEXT +--source col_null.inc + +--let $col_type = TINYTEXT +--source col_null.inc + +--let $col_type = MEDIUMTEXT +--source col_null.inc + +--let $col_type = LONGTEXT +--source col_null.inc + + +--let $col_type = +--let $col_default = +--let $extra_col_opts = + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test new file mode 100644 index 0000000000000..25cda84ce2e5f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_unsigned.test @@ -0,0 +1,74 @@ +--source include/have_rocksdb.inc + +# +# UNSIGNED column attribute +# + +--let $extra_col_opts = UNSIGNED + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +CREATE TABLE t1 ( + a DECIMAL UNSIGNED, + b NUMERIC UNSIGNED, + PRIMARY KEY (a) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +INSERT INTO t1 (a,b) VALUES (-100,100); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +CREATE TABLE t1 ( + a DOUBLE UNSIGNED, + b FLOAT UNSIGNED, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1.0,-1.0); +INSERT INTO t1 (a,b) VALUES (-100,100); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +CREATE TABLE t1 ( + t TINYINT UNSIGNED, + s SMALLINT UNSIGNED, + m MEDIUMINT UNSIGNED, + i INT UNSIGNED, + b BIGINT UNSIGNED, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (t,s,m,i,b) VALUES (255,65535,16777215,4294967295,18446744073709551615); +INSERT INTO t1 (t,s,m,i,b) VALUES (-1,-1,-1,-1,-1); +--sorted_result +SELECT t,s,m,i,b FROM t1; + +DROP TABLE t1; + +--let $extra_col_opts = + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test new file mode 100644 index 0000000000000..37982ae096409 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/col_opt_zerofill.test @@ -0,0 +1,67 @@ +--source include/have_rocksdb.inc + +# +# ZEROFILL column attribute +# + +let $extra_col_opts = ZEROFILL; + +--echo ######################## +--echo # Fixed point columns (NUMERIC, DECIMAL) +--echo ######################## + +--source type_fixed.inc + +CREATE TABLE t1 ( + a DECIMAL ZEROFILL, + b NUMERIC ZEROFILL, + PRIMARY KEY (a) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1.1,1234); +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # Floating point columns (FLOAT, DOUBLE) +--echo ######################## + +--source type_float.inc + +CREATE TABLE t1 ( + a DOUBLE ZEROFILL, + b FLOAT ZEROFILL, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b) VALUES (1,1234.5); +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo ######################## +--echo # INT columns +--echo ######################## + +--source type_int.inc + +CREATE TABLE t1 ( + t TINYINT ZEROFILL, + s SMALLINT ZEROFILL, + m MEDIUMINT ZEROFILL, + i INT ZEROFILL, + b BIGINT ZEROFILL, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (t,s,m,i,b) VALUES (1,10,100,1000,0); +SELECT t,s,m,i,b FROM t1; +DROP TABLE t1; + +--let $extra_col_opts = + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt new file mode 100644 index 0000000000000..79e591636fde8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test new file mode 100644 index 0000000000000..29c2c2886b521 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -0,0 +1,188 @@ +--source include/have_rocksdb.inc +# MariaDB doesn't have server variables to check for GCC version, so the +# following check is commented out: +# --source include/have_fullregex.inc + +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# ci non-indexed column is allowed +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8; +DROP TABLE t1; + +# ci indexed column is not allowed +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8; +--error ER_UNKNOWN_ERROR +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +# ci indexed column with rocksdb_strict_collation_check=OFF is allowed. +SET GLOBAL rocksdb_strict_collation_check=0; +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; +DROP TABLE t1; +SET GLOBAL rocksdb_strict_collation_check=1; + +# cs indexed column is allowed +CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8; +DROP TABLE t1; + +# cs latin1_bin is allowed +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin; +DROP TABLE t1; + +# cs utf8_bin is allowed +CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin; +DROP TABLE t1; + +# cs mixed latin1_bin and utf8_bin is allowed +CREATE TABLE t1 (id varchar(20) collate latin1_bin, value varchar(50) collate utf8_bin, value2 varchar(50) collate latin1_bin, value3 text, primary key (id), index(value, value2)) engine=rocksdb; +DROP TABLE t1; + +# ci indexed column is not allowed unless table name is in exception list +SET GLOBAL rocksdb_strict_collation_exceptions=t1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test regex for exception list +SET GLOBAL rocksdb_strict_collation_exceptions="t.*"; +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t123; +--error ER_UNKNOWN_ERROR +CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +SET GLOBAL rocksdb_strict_collation_exceptions=".t.*"; +CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE xt123; +--error ER_UNKNOWN_ERROR +CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list with commas +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list with vertical bar +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra comma at the front +SET GLOBAL rocksdb_strict_collation_exceptions=",s.*,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra vertical bar at the front +SET GLOBAL rocksdb_strict_collation_exceptions="|s.*|t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra comma in the middle +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,,t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra vertical bar in the middle +SET GLOBAL rocksdb_strict_collation_exceptions="s.*||t.*"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra comma at the end +SET GLOBAL rocksdb_strict_collation_exceptions="s.*,t.*,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and extra vertical bar at the end +SET GLOBAL rocksdb_strict_collation_exceptions="s.*|t.*|"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test multiple entries in the list and tons of commas and vertical bars just for the fun of it +SET GLOBAL rocksdb_strict_collation_exceptions="||||,,,,s.*,,|,,||,t.*,,|||,,,"; +CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE s1; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; + +# test allowing alters to create temporary tables +SET GLOBAL rocksdb_strict_collation_exceptions='t1'; +CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +ALTER TABLE t1 AUTO_INCREMENT=1; +DROP TABLE t1; +--error ER_UNKNOWN_ERROR +CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; +CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb; +--error ER_UNKNOWN_ERROR +ALTER TABLE t2 ADD INDEX(value); +DROP TABLE t2; + + +# test invalid regex (missing end bracket) +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; +let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_PATTERN=Invalid pattern in strict_collation_exceptions: \[a-b; +source include/search_pattern_in_file.inc; +--error ER_UNKNOWN_ERROR +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; +CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +--error ER_UNKNOWN_ERROR +CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE a, b; + +call mtr.add_suppression("Invalid pattern in strict_collation_exceptions:"); +# test invalid regex (trailing escape) +SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; +let SEARCH_PATTERN=Invalid pattern in strict_collation_exceptions: abc; +source include/search_pattern_in_file.inc; +--error ER_UNKNOWN_ERROR +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +SET GLOBAL rocksdb_strict_collation_exceptions="abc"; +CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +--error ER_UNKNOWN_ERROR +CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; +DROP TABLE abc; + +# test bad regex (null caused a crash) - Issue 493 +SET GLOBAL rocksdb_strict_collation_exceptions=null; + +# cleanup +SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt new file mode 100644 index 0000000000000..13563edb43985 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception-master.opt @@ -0,0 +1,2 @@ +--rocksdb_strict_collation_check=ON +--rocksdb_strict_collation_exceptions='r1.lol' diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test new file mode 100644 index 0000000000000..7f741e286b1f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test @@ -0,0 +1,27 @@ +CREATE TABLE `r1.lol` ( + `c1` int(10) NOT NULL DEFAULT '0', + `c2` int(11) NOT NULL DEFAULT '0', + `c3` int(1) NOT NULL DEFAULT '0', + `c4` int(11) NOT NULL DEFAULT '0', + `c5` int(11) NOT NULL DEFAULT '0', + `c6` varchar(100) NOT NULL DEFAULT '', + `c7` varchar(100) NOT NULL DEFAULT '', + `c8` varchar(255) NOT NULL DEFAULT '', + `c9` int(10) NOT NULL DEFAULT '125', + `c10` int(10) NOT NULL DEFAULT '125', + `c11` text NOT NULL, + `c12` int(11) NOT NULL DEFAULT '0', + `c13` int(10) NOT NULL DEFAULT '0', + `c14` text NOT NULL, + `c15` blob NOT NULL, + `c16` int(11) NOT NULL DEFAULT '0', + `c17` int(11) NOT NULL DEFAULT '0', + `c18` int(11) NOT NULL DEFAULT '0', + PRIMARY KEY (`c1`), + KEY i1 (`c4`), + KEY i2 (`c7`), + KEY i3 (`c2`)) ENGINE=RocksDB DEFAULT CHARSET=latin1; + +DROP INDEX i1 ON `r1.lol`; +DROP TABLE `r1.lol`; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test new file mode 100644 index 0000000000000..02f45779b9ad4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/commit_in_the_middle_ddl.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS a; +--enable_warnings + +create table a (id int, value int, primary key (id) comment 'cf_a') engine=rocksdb; + +--disable_query_log +let $i = 1; +while ($i <= 100000) { + let $insert = INSERT INTO a VALUES($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +set rocksdb_bulk_load=1; +set rocksdb_commit_in_the_middle=1; +alter table a add index v (value) COMMENT 'cf_a'; +set rocksdb_bulk_load=0; +set rocksdb_commit_in_the_middle=0; +select count(*) from a force index(primary); +select count(*) from a force index(v); + +DROP TABLE a; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt new file mode 100644 index 0000000000000..3b4871f864a82 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes-master.opt @@ -0,0 +1,3 @@ +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_records_in_range=50 +--rocksdb_compaction_sequential_deletes_count_sd=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test new file mode 100644 index 0000000000000..b61da676b48b7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes.test @@ -0,0 +1,88 @@ +--source include/not_windows.inc +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS r1; +--enable_warnings + +create table r1 ( + id1 int, + id2 int, + type int, + value varchar(100), + value2 int, + value3 int, + primary key (type, id1, id2), + index id1_type (id1, type, value2, value, id2) +) engine=rocksdb collate latin1_bin; + +select 'loading data'; + +--disable_query_log +let $i=0; +while ($i<1000) +{ + inc $i; + eval insert r1(id1, id2, type, value, value2, value3) + values($i,$i,$i, 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',$i,$i); +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now=1; +optimize table r1; + +--echo Test 1: Do a bunch of updates without setting the compaction sysvar +--echo Expect: no compaction +let $window = 0; +let $deletes = 0; +let $file_size = 0; +let $secondary_only = 0; +let $primary = 1; +let $no_more_deletes = 0; +--source compact_deletes_test.inc + +--echo Test 2: Do a bunch of updates and set the compaction sysvar +--echo Expect: compaction +let $window = 1000; +let $deletes = 990; +let $file_size = 0; +let $secondary_only = 0; +let $primary = 1; +let $no_more_deletes = 1; +--source compact_deletes_test.inc + +--echo Test 3: Do a bunch of updates and set the compaction sysvar and a file size to something large +--echo Expect: no compaction +let $window = 1000; +let $deletes = 1000; +let $file_size = 1000000; +let $secondary_only = 0; +let $primary = 1; +let $no_more_deletes = 0; +--source compact_deletes_test.inc + +--echo Test 4: Do a bunch of secondary key updates and set the compaction sysvar +--echo Expect: compaction +let $window = 1000; +let $deletes = 50; +let $file_size = 0; +let $secondary_only = 1; +let $primary = 0; +let $no_more_deletes = 1; +--source compact_deletes_test.inc + +--echo Test 5: Do a bunch of secondary key updates and set the compaction sysvar, +--echo and rocksdb_compaction_sequential_deletes_count_sd turned on +--echo Expect: compaction +let $window = 1000; +let $deletes = 50; +let $file_size = 0; +let $secondary_only = 1; +let $primary = 0; +let $no_more_deletes = 1; +SET @save_rocksdb_compaction_sequential_deletes_count_sd = @@global.rocksdb_compaction_sequential_deletes_count_sd; +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= ON; +--source compact_deletes_test.inc +SET GLOBAL rocksdb_compaction_sequential_deletes_count_sd= @save_rocksdb_compaction_sequential_deletes_count_sd; + +drop table r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc new file mode 100644 index 0000000000000..19a16fbe3a798 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compact_deletes_test.inc @@ -0,0 +1,72 @@ +# Usage: +# let $window = ; +# let $deletes = ; +# let $file_size = ; +# --source compact_deletes_test.inc +# + +let $save_rocksdb_compaction_sequential_deletes_window = `SELECT @@rocksdb_compaction_sequential_deletes_window`; +eval set global rocksdb_compaction_sequential_deletes_window=$window; +let $save_rocksdb_compaction_sequential_deletes = `SELECT @@rocksdb_compaction_sequential_deletes`; +eval set global rocksdb_compaction_sequential_deletes= $deletes; +let $save_rocksdb_compaction_sequential_deletes_file_size = `SELECT @@rocksdb_compaction_sequential_deletes_file_size`; +eval set global rocksdb_compaction_sequential_deletes_file_size=$file_size; +--disable_query_log +let $i=0; +while ($i<1000) +{ + inc $i; + if ($secondary_only) + { + eval update r1 set value2=value2+1 where id1=$i; + } + if ($primary) + { + eval update r1 set id2=id2+10000 where id1=500; + } +} +--enable_query_log +set global rocksdb_force_flush_memtable_now=1; +--sleep 1 + +--disable_query_log +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc +--enable_query_log + +let NO_MORE_DELETES=$no_more_deletes; +perl; + $num_retries=240; + $retry=0; + print "wait_for_delete: $ENV{no_more_deletes}\n"; + while ($retry++ < $num_retries) { + $total_d=$total_e=0; + for $f (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>) { + # excluding system cf + $filename= "$ENV{MARIAROCKS_SST_DUMP} --command=scan --output_hex --file=$f"; + open(D, '-|', $filename) || die("Can't open file $filename: $!"); + while () { + next unless /'(\d{8})/ and $1 >= 8; + $total_d++ if /: [07]/; + $total_e++ if /: 1/; + } + close D; + } + last if $total_e and not ($total_d and $ENV{no_more_deletes}); + sleep 1; + } + + unless ($total_e) { + print "No records in the database\n"; + exit; + } + + print $total_d ? "There are deletes left\n" : "No more deletes left\n"; +EOF + +eval SET GLOBAL rocksdb_compaction_sequential_deletes= $save_rocksdb_compaction_sequential_deletes; +eval SET GLOBAL rocksdb_compaction_sequential_deletes_file_size= $save_rocksdb_compaction_sequential_deletes_file_size; +eval SET GLOBAL rocksdb_compaction_sequential_deletes_window= $save_rocksdb_compaction_sequential_deletes_window; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test new file mode 100644 index 0000000000000..c2216f768d04f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/compression_zstd.test @@ -0,0 +1,14 @@ +--source include/have_rocksdb.inc + +let $no_zstd=`select @@rocksdb_supported_compression_types NOT LIKE '%ZSTD%'`; + +if ($no_zstd) +{ + -- Skip Requires RocksDB to be built with ZStandard Compression support +} + +--let $restart_parameters=--rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0; +--source include/restart_mysqld.inc + +create table t (id int primary key) engine=rocksdb; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test new file mode 100644 index 0000000000000..9ee58aa5217af --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/concurrent_alter.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc + +# Bash +--source include/not_windows.inc + +# +# Generate concurrent requests to alter a table using mysqlslap +# + +--disable_warnings +DROP DATABASE IF EXISTS mysqlslap; +--enable_warnings + +CREATE DATABASE mysqlslap; + +use mysqlslap; + +CREATE TABLE a1 (a int, b int) ENGINE=ROCKSDB; +INSERT INTO a1 VALUES (1, 1); + +--write_file $MYSQL_TMP_DIR/concurrent_alter.sh +$MYSQL_SLAP --silent --delimiter=";" --query="alter table a1 add index bx(b); alter table a1 drop index bx" --concurrency=1 --iterations=25 & +$MYSQL_SLAP --silent --delimiter=";" --query="alter table a1 add index ax(a); alter table a1 drop index ax" --concurrency=1 --iterations=25 & +sleep 2 +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where a=1" --concurrency=16 --iterations=1000 & +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where b=1" --concurrency=16 --iterations=1000 +sleep 2 +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where a=1" --concurrency=16 --iterations=1000 & +$MYSQL_SLAP --silent --delimiter=";" --query="select * from a1 where b=1" --concurrency=16 --iterations=1000 +wait +EOF + +--exec bash $MYSQL_TMP_DIR/concurrent_alter.sh + +SHOW CREATE TABLE a1; + +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test new file mode 100644 index 0000000000000..4dfa5abbbbbb0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_read_committed.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = READ COMMITTED; + +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test new file mode 100644 index 0000000000000..c9f28dbcbe416 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_repeatable_read.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = REPEATABLE READ; + +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test new file mode 100644 index 0000000000000..57b45050fea86 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/cons_snapshot_serializable.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = SERIALIZABLE; + +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc new file mode 100644 index 0000000000000..be01338cb850a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc @@ -0,0 +1,136 @@ +# +# TRANSACTION WITH CONSISTENT SNAPSHOT +# + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; + +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +# While a consistent snapshot transaction is executed, +# no external inserts should be visible to the transaction. +# But it should only work this way for REPEATABLE-READ and SERIALIZABLE + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection con1; +COMMIT; + +# verifying snapshot is released after finishing transaction +connection con2; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection con1; +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO t1 (a) VALUES (1); + +connection con1; +--echo # If consistent read works on this isolation level ($trx_isolation), the following SELECT should not return the value we inserted (1) +SELECT a FROM t1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +CREATE TABLE r1 (id int primary key, value int, value2 int) engine=ROCKSDB; +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +insert into r1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4); + +BEGIN; + +connection con2; +INSERT INTO r1 values (5,5,5); + +connection con1; +SELECT * FROM r1; # 5 + +connection con2; +INSERT INTO r1 values (6,6,6); + +connection con1; +SELECT * FROM r1; # 5 +COMMIT; +SELECT * FROM r1; # 6 + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO r1 values (7,7,7); + +connection con1; +SELECT * FROM r1; # 6 + +connection con2; +INSERT INTO r1 values (8,8,8); + +connection con1; +SELECT * FROM r1; # 6 +COMMIT; +SELECT * FROM r1; # 8 + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO r1 values (9,9,9); + +connection con1; +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno + +connection con2; +INSERT INTO r1 values (10,10,10); + +connection con1; +SELECT * FROM r1; # 9 + +--error 0,ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--echo ERROR: $mysql_errno +# Succeeds with Read Committed, Fails with Repeatable Read +--error 0,ER_UNKNOWN_ERROR +INSERT INTO r1 values (11,11,11); +--echo ERROR: $mysql_errno +SELECT * FROM r1; # self changes should be visible + + +drop table r1; + +connection default; +disconnect con1; +disconnect con2; + + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test new file mode 100644 index 0000000000000..e1f38dd70672e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test @@ -0,0 +1,80 @@ +--source include/have_rocksdb.inc + +--echo # +--echo # Test how MyRocks handles reading corrupted data from disk. +--echo # Data corruption is simulated at source-code level. +--echo # + +--source include/have_debug.inc + + +--echo # +--echo # A test for case when data in the table *record* is longer +--echo # than table DDL expects it to be +--echo # +create table t1 ( + pk int not null primary key, + col1 varchar(10) +) engine=rocksdb; + +insert into t1 values (1,1),(2,2),(3,3); + +select * from t1; + +set @tmp1=@@rocksdb_verify_row_debug_checksums; +set rocksdb_verify_row_debug_checksums=1; +set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; +--error ER_GET_ERRNO +select * from t1 where pk=1; +set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; +set rocksdb_verify_row_debug_checksums=@tmp1; + +select * from t1 where pk=1; + +set session debug_dbug= "+d,myrocks_simulate_bad_row_read2"; +--error ER_GET_ERRNO +select * from t1 where pk=1; +set session debug_dbug= "-d,myrocks_simulate_bad_row_read2"; + +set session debug_dbug= "+d,myrocks_simulate_bad_row_read3"; +--error ER_GET_ERRNO +select * from t1 where pk=1; +set session debug_dbug= "-d,myrocks_simulate_bad_row_read3"; + +insert into t1 values(4,'0123456789'); +select * from t1; +drop table t1; + +--echo # +--echo # A test for case when index data is longer than table DDL +--echo # expects it to be +--echo # + +create table t2 ( + pk varchar(4) not null primary key, + col1 int not null +) engine=rocksdb collate latin1_bin; + +insert into t2 values ('ABCD',1); +select * from t2; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; +--error ER_GET_ERRNO +select * from t2; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; + +drop table t2; + +create table t2 ( + pk varchar(4) not null primary key, + col1 int not null +) engine=rocksdb; + +insert into t2 values ('ABCD',1); + +select * from t2; +set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; +--error ER_GET_ERRNO +select * from t2; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; + +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/create_table.test b/storage/rocksdb/mysql-test/rocksdb/t/create_table.test new file mode 100644 index 0000000000000..4fffe7497c482 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/create_table.test @@ -0,0 +1,192 @@ +--source include/have_rocksdb.inc + +# +# Basic CREATE TABLE statements +# + +############################################# +# TODO: +# A part of the test is currently disabled +# because temporary tables are not supported +############################################# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +# Simple create table with minimal table options +# which are defined in have_engine.inc +# (default empty) plus ENGINE= + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +SHOW CREATE TABLE t1; + +# IF NOT EXISTS +CREATE TABLE IF NOT EXISTS t1 (a INT PRIMARY KEY) ENGINE=rocksdb; + +# CREATE .. LIKE + +CREATE TABLE t2 LIKE t1; +SHOW CREATE TABLE t2; + +--error ER_ILLEGAL_HA_CREATE_OPTION +CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; + +--disable_parsing + +DROP TABLE t2; + +CREATE TEMPORARY TABLE t2 LIKE t1; + +SHOW CREATE TABLE t2; +DROP TEMPORARY TABLE t2; + +--enable_parsing + +DROP TABLE t2; + +DROP TABLE IF EXISTS t1; + +# CREATE .. AS SELECT + +# Use the engine as default + +SET default_storage_engine = rocksdb; + +CREATE TABLE t1 (a INT PRIMARY KEY); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT PRIMARY KEY) AS SELECT 1 AS a UNION SELECT 2 AS a; +SHOW CREATE TABLE t1; +--sorted_result +SELECT * FROM t1; + +# Just to add FLUSH LOGS into the mix while we are in the most common test +FLUSH LOGS; + +DROP TABLE IF EXISTS t1; + +# CREATE TABLE with MAX_INDEXES (64) keys and no primary key +# MyRocks adds a hidden primary key, so make sure we don't break anything +CREATE TABLE t1(c1 INT,c2 INT,c3 INT,c4 INT,c5 INT,c6 INT,c7 INT,c8 INT,c9 INT, + c10 INT,c11 INT,c12 INT,c13 INT,c14 INT,c15 INT,c16 INT,c17 INT, + c18 INT,c19 INT,c20 INT,c21 INT,c22 INT,c23 INT,c24 INT,c25 INT, + c26 INT,c27 INT,c28 INT,c29 INT,c30 INT,c31 INT,c32 INT,c33 INT, + c34 INT,c35 INT,c36 INT,c37 INT,c38 INT,c39 INT,c40 INT,c41 INT, + c42 INT,c43 INT,c44 INT,c45 INT,c46 INT,c47 INT,c48 INT,c49 INT, + c50 INT,c51 INT,c52 INT,c53 INT,c54 INT,c55 INT,c56 INT,c57 INT, + c58 INT,c59 INT,c60 INT,c61 INT,c62 INT,c63 INT,c64 INT,c65 INT, + c66 INT,c67 INT,c68 INT,c69 INT,c70 INT,c71 INT,c72 INT,c73 INT, + c74 INT,c75 INT,c76 INT,c77 INT,c78 INT,c79 INT,c80 INT,c81 INT, + c82 INT,c83 INT,c84 INT,c85 INT,c86 INT,c87 INT,c88 INT,c89 INT, + c90 INT,c91 INT,c92 INT,c93 INT,c94 INT,c95 INT,c96 INT,c97 INT, + c98 INT,c99 INT,c100 INT,c101 INT,c102 INT,c103 INT,c104 INT, + c105 INT,c106 INT,c107 INT,c108 INT,c109 INT,c110 INT,c111 INT, + c112 INT,c113 INT,c114 INT,c115 INT,c116 INT,c117 INT,c118 INT, + c119 INT,c120 INT,c121 INT,c122 INT,c123 INT,c124 INT,c125 INT, + c126 INT,c127 INT,c128 INT,c129 INT,c130 INT,c131 INT,c132 INT, + c133 INT,c134 INT,c135 INT,c136 INT,c137 INT,c138 INT,c139 INT, + c140 INT,c141 INT,c142 INT,c143 INT,c144 INT,c145 INT,c146 INT, + c147 INT,c148 INT,c149 INT,c150 INT,c151 INT,c152 INT,c153 INT, + c154 INT,c155 INT,c156 INT,c157 INT,c158 INT,c159 INT,c160 INT, + c161 INT,c162 INT,c163 INT,c164 INT,c165 INT,c166 INT,c167 INT, + c168 INT,c169 INT,c170 INT,c171 INT,c172 INT,c173 INT,c174 INT, + c175 INT,c176 INT,c177 INT,c178 INT,c179 INT,c180 INT,c181 INT, + c182 INT,c183 INT,c184 INT,c185 INT,c186 INT,c187 INT,c188 INT, + c189 INT,c190 INT,c191 INT,c192 INT,c193 INT,c194 INT,c195 INT, + c196 INT,c197 INT,c198 INT,c199 INT,c200 INT,c201 INT,c202 INT, + c203 INT,c204 INT,c205 INT,c206 INT,c207 INT,c208 INT,c209 INT, + c210 INT,c211 INT,c212 INT,c213 INT,c214 INT,c215 INT,c216 INT, + c217 INT,c218 INT,c219 INT,c220 INT,c221 INT,c222 INT,c223 INT, + c224 INT,c225 INT,c226 INT,c227 INT,c228 INT,c229 INT,c230 INT, + c231 INT,c232 INT,c233 INT,c234 INT,c235 INT,c236 INT,c237 INT, + c238 INT,c239 INT,c240 INT,c241 INT,c242 INT,c243 INT,c244 INT, + c245 INT,c246 INT,c247 INT,c248 INT,c249 INT,c250 INT,c251 INT, + c252 INT,c253 INT,c254 INT,c255 INT,c256 INT,c257 INT,c258 INT, + c259 INT,c260 INT,c261 INT,c262 INT,c263 INT,c264 INT,c265 INT, + c266 INT,c267 INT,c268 INT,c269 INT,c270 INT,c271 INT,c272 INT, + c273 INT,c274 INT,c275 INT,c276 INT,c277 INT,c278 INT,c279 INT, + c280 INT,c281 INT,c282 INT,c283 INT,c284 INT,c285 INT,c286 INT, + c287 INT,c288 INT,c289 INT,c290 INT,c291 INT,c292 INT,c293 INT, + c294 INT,c295 INT,c296 INT,c297 INT,c298 INT,c299 INT,c300 INT, + c301 INT,c302 INT,c303 INT,c304 INT,c305 INT,c306 INT,c307 INT, + c308 INT,c309 INT,c310 INT,c311 INT,c312 INT,c313 INT,c314 INT, + c315 INT,c316 INT,c317 INT,c318 INT,c319 INT,c320 INT,c321 INT, + c322 INT,c323 INT,c324 INT,c325 INT,c326 INT,c327 INT,c328 INT, + c329 INT,c330 INT,c331 INT,c332 INT,c333 INT,c334 INT,c335 INT, + c336 INT,c337 INT,c338 INT,c339 INT,c340 INT,c341 INT,c342 INT, + c343 INT,c344 INT,c345 INT,c346 INT,c347 INT,c348 INT,c349 INT, + c350 INT,c351 INT,c352 INT,c353 INT,c354 INT,c355 INT,c356 INT, + c357 INT,c358 INT,c359 INT,c360 INT,c361 INT,c362 INT,c363 INT, + c364 INT,c365 INT,c366 INT,c367 INT,c368 INT,c369 INT,c370 INT, + c371 INT,c372 INT,c373 INT,c374 INT,c375 INT,c376 INT,c377 INT, + c378 INT,c379 INT,c380 INT,c381 INT,c382 INT,c383 INT,c384 INT, + c385 INT,c386 INT,c387 INT,c388 INT,c389 INT,c390 INT,c391 INT, + c392 INT,c393 INT,c394 INT,c395 INT,c396 INT,c397 INT,c398 INT, + c399 INT,c400 INT,c401 INT,c402 INT,c403 INT,c404 INT,c405 INT, + c406 INT,c407 INT,c408 INT,c409 INT,c410 INT,c411 INT,c412 INT, + c413 INT,c414 INT,c415 INT,c416 INT,c417 INT,c418 INT,c419 INT, + c420 INT,c421 INT,c422 INT,c423 INT,c424 INT,c425 INT,c426 INT, + c427 INT,c428 INT,c429 INT,c430 INT,c431 INT,c432 INT,c433 INT, + c434 INT,c435 INT,c436 INT,c437 INT,c438 INT,c439 INT,c440 INT, + c441 INT,c442 INT,c443 INT,c444 INT,c445 INT,c446 INT,c447 INT, + c448 INT, + KEY (c1,c2,c3,c4,c5,c6,c7),KEY (c8,c9,c10,c11,c12,c13,c14), + KEY (c15,c16,c17,c18,c19,c20,c21),KEY (c22,c23,c24,c25,c26,c27,c28), + KEY (c29,c30,c31,c32,c33,c34,c35),KEY (c36,c37,c38,c39,c40,c41,c42), + KEY (c43,c44,c45,c46,c47,c48,c49),KEY (c50,c51,c52,c53,c54,c55,c56), + KEY (c57,c58,c59,c60,c61,c62,c63),KEY (c64,c65,c66,c67,c68,c69,c70), + KEY (c71,c72,c73,c74,c75,c76,c77),KEY (c78,c79,c80,c81,c82,c83,c84), + KEY (c85,c86,c87,c88,c89,c90,c91),KEY (c92,c93,c94,c95,c96,c97,c98), + KEY (c99,c100,c101,c102,c103,c104,c105), + KEY (c106,c107,c108,c109,c110,c111,c112), + KEY (c113,c114,c115,c116,c117,c118,c119), + KEY (c120,c121,c122,c123,c124,c125,c126), + KEY (c127,c128,c129,c130,c131,c132,c133), + KEY (c134,c135,c136,c137,c138,c139,c140), + KEY (c141,c142,c143,c144,c145,c146,c147), + KEY (c148,c149,c150,c151,c152,c153,c154), + KEY (c155,c156,c157,c158,c159,c160,c161), + KEY (c162,c163,c164,c165,c166,c167,c168), + KEY (c169,c170,c171,c172,c173,c174,c175), + KEY (c176,c177,c178,c179,c180,c181,c182), + KEY (c183,c184,c185,c186,c187,c188,c189), + KEY (c190,c191,c192,c193,c194,c195,c196), + KEY (c197,c198,c199,c200,c201,c202,c203), + KEY (c204,c205,c206,c207,c208,c209,c210), + KEY (c211,c212,c213,c214,c215,c216,c217), + KEY (c218,c219,c220,c221,c222,c223,c224), + KEY (c225,c226,c227,c228,c229,c230,c231), + KEY (c232,c233,c234,c235,c236,c237,c238), + KEY (c239,c240,c241,c242,c243,c244,c245), + KEY (c246,c247,c248,c249,c250,c251,c252), + KEY (c253,c254,c255,c256,c257,c258,c259), + KEY (c260,c261,c262,c263,c264,c265,c266), + KEY (c267,c268,c269,c270,c271,c272,c273), + KEY (c274,c275,c276,c277,c278,c279,c280), + KEY (c281,c282,c283,c284,c285,c286,c287), + KEY (c288,c289,c290,c291,c292,c293,c294), + KEY (c295,c296,c297,c298,c299,c300,c301), + KEY (c302,c303,c304,c305,c306,c307,c308), + KEY (c309,c310,c311,c312,c313,c314,c315), + KEY (c316,c317,c318,c319,c320,c321,c322), + KEY (c323,c324,c325,c326,c327,c328,c329), + KEY (c330,c331,c332,c333,c334,c335,c336), + KEY (c337,c338,c339,c340,c341,c342,c343), + KEY (c344,c345,c346,c347,c348,c349,c350), + KEY (c351,c352,c353,c354,c355,c356,c357), + KEY (c358,c359,c360,c361,c362,c363,c364), + KEY (c365,c366,c367,c368,c369,c370,c371), + KEY (c372,c373,c374,c375,c376,c377,c378), + KEY (c379,c380,c381,c382,c383,c384,c385), + KEY (c386,c387,c388,c389,c390,c391,c392), + KEY (c393,c394,c395,c396,c397,c398,c399), + KEY (c400,c401,c402,c403,c404,c405,c406), + KEY (c407,c408,c409,c410,c411,c412,c413), + KEY (c414,c415,c416,c417,c418,c419,c420), + KEY (c421,c422,c423,c424,c425,c426,c427), + KEY (c428,c429,c430,c431,c432,c433,c434), + KEY (c435,c436,c437,c438,c439,c440,c441), + KEY (c442,c443,c444,c445,c446,c447,c448)); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test b/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test new file mode 100644 index 0000000000000..3be7fda9952a7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/deadlock.test @@ -0,0 +1,43 @@ +--echo # +--echo # Validate that deadlock errors don't occur with a high level of concurrency +--echo # + +--source include/have_rocksdb.inc + +--echo # Disable for valgrind because this takes too long +--source include/not_valgrind.inc + +--disable_warnings +DROP DATABASE IF EXISTS mysqlslap; +--enable_warnings + +CREATE DATABASE mysqlslap; +USE mysqlslap; +CREATE TABLE t1(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2), KEY(id2)) ENGINE=rocksdb; +CREATE TABLE t1rev(id1 BIGINT, id2 BIGINT, count INT, PRIMARY KEY(id1, id2) COMMENT "rev:cf2", KEY(id2) COMMENT "rev:cf2") ENGINE=rocksdb; + +SET @save = @@global.rocksdb_lock_wait_timeout; +SET GLOBAL rocksdb_lock_wait_timeout = 60; + +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="INSERT INTO t1 VALUES(1, 1, 1) ON DUPLICATE KEY UPDATE count=count+1" +SELECT count from t1; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1 WHERE id1=1 AND id2=1" +SELECT count from t1; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1 WHERE id2=1" +SELECT count from t1; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1 SET count=count+1" +SELECT count from t1; + +# Same tests on a table with reverse orderings +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="INSERT INTO t1rev VALUES(1, 1, 1) ON DUPLICATE KEY UPDATE count=count+1" +SELECT count from t1rev; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1 WHERE id1=1 AND id2=1" +SELECT count from t1rev; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1 WHERE id2=1" +SELECT count from t1rev; +--exec $MYSQL_SLAP --silent --concurrency=50 --number-of-queries=50000 --query="UPDATE t1rev SET count=count+1" +SELECT count from t1rev; + +SET GLOBAL rocksdb_lock_wait_timeout = @save; + +DROP DATABASE mysqlslap; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete.test b/storage/rocksdb/mysql-test/rocksdb/t/delete.test new file mode 100644 index 0000000000000..b1654e606a5c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete.test @@ -0,0 +1,101 @@ +--source include/have_rocksdb.inc + +# +# Basic DELETE statements. +# DELETE LOW_PRIORITY is covered in delete_low_prio test +# DELETE QUICK is covered in delete_quick test (syntax only) +# DELETE IGNORE is covered in delete_ignore test +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +# Single-table DELETE + +DELETE FROM t1 WHERE b IN ('c'); +--sorted_result +SELECT a,b FROM t1; + +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +--sorted_result +SELECT a,b FROM t1; + +# ORDER BY and LIMIT +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +# Multi-table DELETE + +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE t2.* FROM t1, t2 WHERE c < b AND a + d != 1; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE FROM t2, t1.* USING t2, t1 WHERE c = 'foobar' and b = c; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE FROM t1; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1, t2; + + +# +# Transactional DELETE +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f'),(7,'g'),(8,'h'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +BEGIN; +DELETE FROM t1 WHERE b IN ('c'); +--sorted_result +SELECT a,b FROM t1; +DELETE FROM t1 WHERE a < 0 OR b = 'a'; +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +# Savepoints + +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; + +DELETE FROM t1; +RELEASE SAVEPOINT spt1; +ROLLBACK; +--sorted_result +SELECT a,b FROM t1; + +BEGIN; +DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; +SAVEPOINT spt1; +DELETE FROM t1; +INSERT INTO t1 (a,b) VALUES (1,'a'); +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT spt1; +--error ER_UNKNOWN_ERROR +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test new file mode 100644 index 0000000000000..93a9d1adaf940 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_before_lock.test @@ -0,0 +1,36 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +# This is a test case to reproduce https://github.com/facebook/mysql-5.6/issues/162 +# Expected output of the last select for update was (1,2,100) and (1,3,100), but +# currently it returns (1,2,1) and (1,3,1), which must be fixed. + +connect (con, localhost, root,,); +connection default; + +--disable_warnings +set debug_sync='RESET'; +drop table if exists t1; +--enable_warnings + +create table t1 (id1 int, id2 int, value int, primary key (id1, id2)) engine=rocksdb; +insert into t1 values (1, 1, 1),(1, 2, 1),(1, 3, 1), (2, 2, 2); + +connection con; +set debug_sync='rocksdb.get_row_by_rowid SIGNAL parked WAIT_FOR go'; +send update t1 set value=100 where id1=1; + +connection default; +set debug_sync='now WAIT_FOR parked'; +delete from t1 where id1=1 and id2=1; +set debug_sync='now SIGNAL go'; + +connection con; +reap; +select * from t1 where id1=1 for update; + +# Cleanup +connection default; +disconnect con; +set debug_sync='RESET'; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test new file mode 100644 index 0000000000000..d087d80f4acf1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_ignore.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc + +# +# DELETE IGNORE +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t1 (a,b) VALUES (10000,'foobar'),(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY, c CHAR(8), d INT) ENGINE=rocksdb; + +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE IGNORE FROM t1 WHERE b IS NOT NULL ORDER BY a LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +DELETE IGNORE t1.*, t2.* FROM t1, t2 WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +# Cleanup +DROP TABLE t1, t2; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test new file mode 100644 index 0000000000000..127ef47dfaf0a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_quick.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# +# DELETE QUICK syntax. +# For now we only check that the keyword is accepted, +# without actually checking whether the feature works. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); + +DELETE QUICK FROM t1 WHERE a = 1 OR b > 'foo'; +--sorted_result +SELECT a,b FROM t1; + +CREATE TABLE t2 (c CHAR(8), d INT, PRIMARY KEY (c)) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DELETE QUICK FROM t2, t1.* USING t2, t1 WHERE c IS NULL OR a = d; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test new file mode 100644 index 0000000000000..f4d890eb1b44a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete_with_keys.test @@ -0,0 +1,39 @@ +--source include/have_rocksdb.inc + +# +# DELETE statements for tables with keys +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; + +--sorted_result +SELECT a,b FROM t1; +DELETE FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +DELETE FROM t1 WHERE b > 'y'; +DELETE FROM t1 WHERE a=2; + +--sorted_result +SELECT a,b FROM t1; +DELETE FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b INT, c INT, pk INT AUTO_INCREMENT PRIMARY KEY, KEY(a), KEY (b)) ENGINE=rocksdb; + +INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6),(7,8,9); +DELETE FROM t1 WHERE a = 10 OR b = 20 ORDER BY c LIMIT 1; +--sorted_result +SELECT a,b,c FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/describe.test b/storage/rocksdb/mysql-test/rocksdb/t/describe.test new file mode 100644 index 0000000000000..9bc5d299a3165 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/describe.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc + +# +# DESCRIBE statement +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foo'),(2, 'b'); + +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb CHARACTER SET utf8; +INSERT INTO t2 (a,b) VALUES (1, 'bar'); + +CREATE TABLE t3 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; + +DESCRIBE t1; +DESC t2 a; +DESCRIBE t3 '%'; + +DROP TABLE t1, t2, t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def new file mode 100644 index 0000000000000..8d3fc09027398 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -0,0 +1,59 @@ +cons_snapshot_serializable : Consistent read does not work on serializable +level_read_uncommitted : Not supported +level_serializable: Not supported + + +# Tests that are (temporarily) disabled in MariaDB: +slow_query_log: MDEV-11480 +select_for_update_skip_locked_nowait: MDEV-11481 +rpl_read_free: MDEV-10976 + +optimizer_loose_index_scans: MariaDB doesnt support Skip Scan + +# MDEV-11735: MyRocks: Gap Lock detector support +gap_lock_issue254: MDEV-11735: MyRocks: Gap Lock detector support +gap_lock_raise_error: MDEV-11735: MyRocks: Gap Lock detector support + +# +# The idea of including RQG as a submodule and running RQG as part of +# MTR tests doesn't seem to be a good fit in MariaDB atm. +# +# The objection is that MTR tests are deterministic and can be run in +# a constrained environment. +# +rqg_examples : Test that use RQG are disabled +rqg_runtime : Test that use RQG are disabled +rqg_transactions : Test that use RQG are disabled + +# +# Temporarily disabled tests +# +information_schema : MariaRocks: requires GTIDs +mysqlbinlog_gtid_skip_empty_trans_rocksdb : MariaRocks: requires GTIDs +read_only_tx : MariaRocks: requires GTIDs +rpl_row_triggers : MariaRocks: requires GTIDs + +trx_info_rpl : MariaRocks: @@rpl_skip_tx_api doesn't work, yet. +2pc_group_commit : MariaRocks: Group Commit is not functional yet + +mysqldump : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key +mysqldump2 : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key + +show_engine : MariaRocks: MariaDB doesnt support SHOW ENGINE rocksdb TRANSACTION STATUS + +rpl_row_not_found : MariaDB doesnt support slave_exec_mode='SEMI_STRICT' + +blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api test fails + +persistent_cache: Upstream RocksDB bug https://github.com/facebook/mysql-5.6/issues/579 + +collation: Fails on gcc 4.8 and before, MDEV-12433 +rocksdb : Intermittent failures in BB +unique_sec : Intermittent failures in BB + +# See also storage/rocksdb/mysql-test/rocksdb/suite.pm +# Running tests under valgrind is disabled there. + +allow_no_pk_concurrent_insert: stress test +rocksdb_deadlock_stress_rc: stress test +rocksdb_deadlock_stress_rr: stress test diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test new file mode 100644 index 0000000000000..174a4bbf28694 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_database.test @@ -0,0 +1,11 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP DATABASE IF EXISTS test_drop_database; +--enable_warnings + +CREATE DATABASE test_drop_database; +CREATE TABLE t1 (a int, b int, c int, primary key (a), unique key (b)) ENGINE=ROCKSDB; +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY (a); +DROP TABLE t1; +DROP DATABASE test_drop_database; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test new file mode 100644 index 0000000000000..57d7cdf57c2e2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_index_inplace.test @@ -0,0 +1,116 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +## +## test dropping index inplace +## + +CREATE TABLE t1 (a INT, b INT AUTO_INCREMENT, KEY ka(a), KEY kb(a,b), PRIMARY KEY(b)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (a) VALUES (1); +INSERT INTO t1 (a) VALUES (3); +INSERT INTO t1 (a) VALUES (5); + +ALTER TABLE t1 DROP INDEX ka, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +# Key ka does not exist in table t1 +--error 1176 +SELECT * FROM t1 FORCE INDEX(ka) where a > 1; + +--sorted_result +SELECT * FROM t1 FORCE INDEX(kb) where a > 1; +--sorted_result +SELECT * FROM t1 where b > 1; + +DROP TABLE t1; + +## +## test dropping multiple indexes at once and multi-part indexes +## + +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + + +# test restarting to make sure everything is still ok and persisted properly +--source include/restart_mysqld.inc + +SHOW CREATE TABLE t1; + +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); + +--sorted_result +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; +--sorted_result +SELECT * FROM t1 where b > 3; + +DROP TABLE t1; + +# test dropping pk to see if thats still ok +CREATE TABLE t1 (a INT, b INT, c INT, KEY kb(b), KEY kbc(b,c), KEY kc(c), PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW INDEX IN t1; +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; + +ALTER TABLE t1 DROP PRIMARY KEY; +SHOW INDEX IN t1; +# test dropping index on tables with no pk +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW INDEX IN t1; + +DROP TABLE t1; + +# test dropping unique keys +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, c INT, PRIMARY KEY(a)) ENGINE=rocksdb; +ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +ALTER TABLE t1 ADD UNIQUE INDEX kbc(b,c); +ALTER TABLE t1 ADD UNIQUE INDEX kc(c); +SHOW INDEX IN t1; + +ALTER TABLE t1 DROP INDEX kb, DROP INDEX kbc; +SHOW INDEX IN t1; + +# test restarting to make sure everything is still ok and persisted properly +--source include/restart_mysqld.inc + +--sorted_result +INSERT INTO t1 (b,c) VALUES (1,2); +INSERT INTO t1 (b,c) VALUES (3,4); +INSERT INTO t1 (b,c) VALUES (5,6); +SELECT * FROM t1 FORCE INDEX(kc) where c > 3; + +# test dropping index on tables with no pk +ALTER TABLE t1 DROP INDEX kc, ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +# case where dropping column, where column is the key, we dont want to use +# inplace in this scenario +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; + +# case drop and add at same time, should not use inplace algorithm yet +CREATE TABLE IF NOT EXISTS t1 (col1 INT, col2 INT, col3 INT); +INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3); +ALTER TABLE t1 ADD KEY idx ( col1, col2 ); +ANALYZE TABLE t1; +ALTER TABLE t1 DROP COLUMN col2; +ALTER TABLE t1 DROP COLUMN col3; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc new file mode 100644 index 0000000000000..b40004402c919 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_stats_procedure.inc @@ -0,0 +1,3 @@ +drop procedure save_read_stats; +drop procedure get_read_stats; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt new file mode 100644 index 0000000000000..f53a6050e8940 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt @@ -0,0 +1,3 @@ +--rocksdb_max_background_compactions=8 +--rocksdb_max_subcompactions=1 +--rocksdb_default_cf_options=write_buffer_size=512k;target_file_size_base=512k;level0_file_num_compaction_trigger=2;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;max_bytes_for_level_base=1m diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test new file mode 100644 index 0000000000000..0d48ae461ca08 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -0,0 +1,154 @@ +--source include/have_rocksdb.inc +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +--enable_warnings + +# Start from clean slate +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +--source include/restart_mysqld.inc + +CREATE TABLE t1 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t2 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t3 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t4 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 1000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t2; +--source drop_table_repopulate_table.inc +let $table = t3; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +drop table t2; + +# Restart the server before t2's indices are deleted +--source include/restart_mysqld.inc + +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +drop table t3; + +# Insert enough data to trigger compactions that eliminate t2 and t3 +let $max = 50000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + +drop table t4; + +# Restart the server before t4's indices are deleted +--source include/restart_mysqld.inc + +# Make sure new table gets unique indices +CREATE TABLE t5 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +let $max = 1000; +let $table = t5; +--source drop_table_repopulate_table.inc + +drop table t5; + +# Manually compact column families, cleaning up all lingering data +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; + +# Signal thread to check for dropped indices +set global rocksdb_signal_drop_index_thread = 1; + +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc + +# Get list of all indices needing to be dropped +# Check total compacted-away rows for all indices +# Check that all indices have been successfully dropped +perl; + +sub print_array { + $str = shift; + $prev= $_[0]; + foreach (@_) { + $dummy_idx = $_ - $prev; + $prev= $_; + print "$str $dummy_idx\n"; + } +} + +$filename= "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; +open(F, '<', $filename) || die("Can't open file $filename: $!"); +while () { + %a = @b = @c = () if /CURRENT_TEST/; + if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { + $a{$1} += $2; + } + if (/Begin filtering dropped index \(\d+,(\d+)\)/) { + push @b, $1; + } + if (/Finished filtering dropped index \(\d+,(\d+)\)/) { + push @c, $1; + } +} + +$prev= 0; +foreach (sort {$a <=> $b} keys %a){ + if ($prev) { + $dummy_idx= $_ - $prev; + }else { + $dummy_idx= 0; + } + $prev= $_; +} +print_array("Begin filtering dropped index+", sort {$a <=> $b} @b); +print_array("Finished filtering dropped index+", sort {$a <=> $b} @c); +EOF + +# Cleanup +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test new file mode 100644 index 0000000000000..1b5f6c14ee1fb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -0,0 +1,120 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; +--enable_warnings + +# Start from clean slate +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +--source include/restart_mysqld.inc + +CREATE TABLE t1 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t2 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t3 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +CREATE TABLE t4 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 1000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t2; +--source drop_table_repopulate_table.inc +let $table = t3; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + + +# Restart the server before t2's indices are deleted +--source include/restart_mysqld.inc + +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + + +# Insert enough data to trigger compactions that eliminate t2 and t3 +let $max = 50000; +let $table = t1; +--source drop_table_repopulate_table.inc +let $table = t4; +--source drop_table_repopulate_table.inc + + +# Restart the server before t4's indices are deleted +--source include/restart_mysqld.inc + +# Make sure new table gets unique indices +CREATE TABLE t5 ( + a int not null, + b int not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +let $max = 1000; +let $table = t5; +--source drop_table_repopulate_table.inc + +perl; +$size+=-s $_ for (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>); +$filename= "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"; +open(F, '>', $filename) || die("Can't open file $filename: $!"); +print F $size; +EOF +drop table t1; +drop table t2; +drop table t3; +drop table t4; +drop table t5; + +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc + +# Check that space is reclaimed +perl; +$size+=-s $_ for (<$ENV{MYSQLTEST_VARDIR}/mysqld.1/data/.rocksdb/*.sst>); +$filename= "$ENV{MYSQLTEST_VARDIR}/tmp/size_output"; +open(F, '<', $filename) || die("Can't open file $filename: $!"); +$old=; +print "Compacted\n" if $old > $size * 2; +EOF + +# Cleanup diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt new file mode 100644 index 0000000000000..a9ebc4ec20b96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3-master.opt @@ -0,0 +1,2 @@ +--rocksdb_max_subcompactions=1 +--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;compression_per_level=kNoCompression; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc new file mode 100644 index 0000000000000..7a643d9a72057 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.inc @@ -0,0 +1,49 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("Column family 'cf1' not found"); +call mtr.add_suppression("Column family 'rev:cf2' not found"); + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Start from clean slate +set global rocksdb_compact_cf = 'cf1'; +set global rocksdb_compact_cf = 'rev:cf2'; +set global rocksdb_signal_drop_index_thread = 1; +--source include/restart_mysqld.inc + +CREATE TABLE t1 ( + a int not null, + b int not null, + c varchar(500) not null, + primary key (a,b) comment 'cf1', + key (b) comment 'rev:cf2' +) ENGINE=RocksDB; + +# Populate tables +let $max = 50000; +let $table = t1; +--source drop_table3_repopulate_table.inc + +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; +if ($truncate_table) +{ + truncate table t1; +} +if ($drop_table) +{ + drop table t1; +} + +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc + +select case when variable_value-@a < 500000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_compact_read_bytes'; + +# Cleanup +DROP TABLE IF EXISTS t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test new file mode 100644 index 0000000000000..b3a6bf9958e5d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +-- let $truncate_table = 0 +-- let $drop_table = 1 +-- source drop_table3.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc new file mode 100644 index 0000000000000..c34af07204f44 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table3_repopulate_table.inc @@ -0,0 +1,15 @@ +# Usage: +# let $max = ; +# let $table = ; +# --source drop_table_repopulate_table.inc +# +eval DELETE FROM $table; + +--disable_query_log +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO $table VALUES ($i, $i, rpad('a', 499, 'b')); + inc $i; + eval $insert; +} +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc new file mode 100644 index 0000000000000..6faf41ef7b159 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_repopulate_table.inc @@ -0,0 +1,15 @@ +# Usage: +# let $max = ; +# let $table =
; +# --source drop_table_repopulate_table.inc +# +eval DELETE FROM $table; + +--disable_query_log +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO $table VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc new file mode 100644 index 0000000000000..c6a3ccde7a63f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_sync.inc @@ -0,0 +1,6 @@ +let $show_rpl_debug_info= 1; # to force post-failure printout +let $wait_timeout= 300; # Override default 30 seconds with 300. +let $wait_condition = select count(*) = 0 + as c from information_schema.rocksdb_global_info + where TYPE = 'DDL_DROP_INDEX_ONGOING'; +--source include/wait_condition.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test new file mode 100644 index 0000000000000..a9a09d44e0128 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/dup_key_update.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +# Test insert ... on duplicate key update statements + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +CREATE TABLE t1 (id1 INT, id2 INT, id3 INT, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; + +CREATE TABLE t2 (id1 INT, id2 INT, id3 INT, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; + + +--source include/dup_key_update.inc + +# Cleanup +DROP TABLE t1; +DROP TABLE t2; + +CREATE TABLE t1 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, + id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, + id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1)) ENGINE=ROCKSDB; + +CREATE TABLE t2 (id1 varchar(128) CHARACTER SET latin1 COLLATE latin1_bin, + id2 varchar(256) CHARACTER SET utf8 COLLATE utf8_bin, + id3 varchar(200) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + PRIMARY KEY (id1, id2, id3), + UNIQUE KEY (id3, id1) COMMENT 'rev:cf') ENGINE=ROCKSDB; + +--source include/dup_key_update.inc + +# Cleanup +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test new file mode 100644 index 0000000000000..9ac89a128c90d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc +--disable_warnings +DROP TABLE IF EXISTS t; +--enable_warnings +CREATE TABLE t(id int primary key) engine=rocksdb; +INSERT INTO t values (1), (2), (3); +--error ER_TABLE_EXISTS_ERROR +CREATE TABLE t(id int primary key) engine=rocksdb; +FLUSH TABLES; +move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp; +--error ER_UNKNOWN_ERROR +CREATE TABLE t(id int primary key) engine=rocksdb; +move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm; +FLUSH TABLES; +SELECT * FROM t; +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test new file mode 100644 index 0000000000000..255819704a828 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/fail_system_cf.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +# +# Any create table using the system column family should fail + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--error ER_WRONG_ARGUMENTS +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB; + +#cleanup +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test new file mode 100644 index 0000000000000..bd8071b1b5ea4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test @@ -0,0 +1,45 @@ +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (b INT PRIMARY KEY); + +# Try simple foreign key - should fail +--error ER_NOT_SUPPORTED_YET +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b)); + +# Try simple valid syntax with 'foreign' as part - should succeed +CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL); +DROP TABLE t2; + +# Try simple valid syntax with 'foreign' and 'key' as part (with no space) - should succeed +CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL); +DROP TABLE t2; + +# Try with valid id containing 'foreign' and then a foreign key - should fail +--error ER_NOT_SUPPORTED_YET +CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b)); + +CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL); +# Alter with foreign key - should fail +--error ER_NOT_SUPPORTED_YET +ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b); +DROP TABLE t2; + +# Alter with valid syntax that contains 'foreign' - should succeed +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD bforeign INT NOT NULL; +DROP TABLE t2; + +# Alter with valid syntax that contains 'foreign' and 'key' (no space) - should succeed +CREATE TABLE t2 (a INT NOT NULL); +ALTER TABLE t2 ADD foreignkey INT NOT NULL; +DROP TABLE t2; + +# Alter with valid syntax that contains 'foreign' and then foreign key - should fail +CREATE TABLE t2 (a INT NOT NULL); +--error ER_NOT_SUPPORTED_YET +ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b); +DROP TABLE t2; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt new file mode 100644 index 0000000000000..f0b7f4b5ce512 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254-master.opt @@ -0,0 +1 @@ +--binlog-format=row --binlog-row-image=full --gap-lock-raise-error=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test new file mode 100644 index 0000000000000..af7c9b1ab4f02 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_issue254.test @@ -0,0 +1,14 @@ +-- source include/have_binlog_format_row.inc +-- source include/have_rocksdb.inc + +# For issue#254 +create table t (id int primary key, value int); +begin; +update t set value=100 where id in (1, 2); +commit; +begin; +--error ER_UNKNOWN_ERROR +select * from t for update; +commit; +drop table t; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test new file mode 100644 index 0000000000000..59fe7e6f80a3f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/gap_lock_raise_error.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc + +let $engine=rocksdb; +--source include/gap_lock_raise_error_all.inc + +SET @save_gap_lock_exceptions = @@global.gap_lock_exceptions; + +SET GLOBAL gap_lock_exceptions="t.*"; +--source include/gap_lock_raise_error_init.inc + +set session autocommit=0; +--error ER_UNKNOWN_ERROR +select * from gap1 limit 1 for update; +--error ER_UNKNOWN_ERROR +select * from gap1 where value != 100 limit 1 for update; + +--source include/gap_lock_raise_error_cleanup.inc + +SET GLOBAL gap_lock_exceptions="gap.*"; +--source include/gap_lock_raise_error_init.inc + +set session autocommit=0; +select * from gap1 limit 1 for update; +select * from gap1 where value != 100 limit 1 for update; + +--source include/gap_lock_raise_error_cleanup.inc + +# This test has been temporarily removed because it fails when the server +# is compiled using GCC 4.8 as full regular expression handling was added +# in GCC 4.9. We need to add the ability to detect if full regex is +# available before re-enabling this test. +## Make sure we handle invalid regex expressions and generate a warning +#--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err +#SET GLOBAL gap_lock_exceptions="[a-b,abc\\"; +#--exec grep -A 2 "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 + +SET GLOBAL gap_lock_exceptions=@save_gap_lock_exceptions; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test new file mode 100644 index 0000000000000..b4c9e2dae3ac7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/get_error_message.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc +--source include/not_windows.inc # Data directory option not availablr + +# +# Create tables with partitions and try to generate an error while creating +# partitions. +# +--disable_warnings + +DROP TABLE IF EXISTS t1; + +--enable_warnings + +CREATE TABLE t1(a INT,b INT,KEY (a)) PARTITION BY HASH (a) PARTITIONS 3; + +SHOW TABLES; + +--disable_query_log +call mtr.add_suppression("Failed to execute action for entry.*"); +--enable_query_log + +--error ER_WRONG_TABLE_NAME +ALTER TABLE t1 ADD PARTITION(PARTITION p3 DATA DIRECTORY='G:/mysqltest/p3Data' INDEX DIRECTORY='H:/mysqltest/p3Index'); + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test new file mode 100644 index 0000000000000..7b1652c759beb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/handler_basic.test @@ -0,0 +1,52 @@ +--source include/have_rocksdb.inc + +# +# Basic HANDLER counts +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +FLUSH STATUS; +CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(100), b INT, +INDEX b(b)) ENGINE=rocksdb; +INSERT INTO t1 (id,a,b) VALUES (1,'foobar',100),(2,'z',0),(3,'bar',50); +SHOW SESSION STATUS LIKE 'Handler_write%'; + +UPDATE t1 SET b=1000 WHERE id=1; +SHOW SESSION STATUS LIKE 'Handler_update%'; + +DELETE FROM t1 WHERE id=2; +SHOW SESSION STATUS LIKE 'Handler_delete%'; + +INSERT INTO t1 (id,b) VALUES(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +SHOW SESSION STATUS LIKE 'Handler_write%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE id=8; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE b=6; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +--sorted_result +SELECT * FROM t1; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE id >=8 ORDER BY id; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +FLUSH STATUS; +SELECT * FROM t1 WHERE id < 8 ORDER BY id; +SHOW SESSION STATUS LIKE 'Handler_read%'; + +# Cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc new file mode 100644 index 0000000000000..17baf5b6c5769 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.inc @@ -0,0 +1,257 @@ +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +### See full test cases here: +### https://github.com/ept/hermitage/blob/master/mysql.md + +--disable_warnings +DROP TABLE IF EXISTS test; +--enable_warnings + +connect (con1,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con2,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con3,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +connection con1; + +create table test (id int primary key, value int) engine=rocksdb; + +### Prevents Aborted Reads (G1a) +--source hermitage_init.inc + +connection con1; +select * from test; +update test set value = 101 where id = 1; +connection con2; +select * from test; # Still shows 1 => 10 +connection con1; +rollback; +connection con2; # Still shows 1 => 10 +select * from test; +commit; + + +### Prevents Intermediate Reads (G1b) +--source hermitage_init.inc + +connection con1; +update test set value = 101 where id = 1; +connection con2; +select * from test; # Still shows 1 => 10 +connection con1; +update test set value = 11 where id = 1; +commit; +connection con2; +select * from test; # Now shows 1 => 11 +commit; + + +### Prevents Circular Information Flow (G1c) +--source hermitage_init.inc + +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 22 where id = 2; +connection con1; +select * from test where id = 2; # Still shows 2 => 20 +connection con2; +select * from test where id = 1; # Still shows 1 => 10 +connection con1; +commit; +connection con2; +commit; + + +### prevents Observed Transaction Vanishes (OTV) +--source hermitage_init.inc + +connection con1; +update test set value = 11 where id = 1; +update test set value = 19 where id = 2; +connection con2; +send update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +reap; +connection con3; +select * from test; # Shows 1 => 11, 2 => 19 +connection con2; +update test set value = 18 where id = 2; +connection con3; +select * from test; # Shows 1 => 11, 2 => 19 +connection con2; +commit; +connection con3; +select * from test; # Shows 1 => 12, 2 => 18 +commit; + + +### Predicate-Many-Preceders (PMP) -- RC does not prevent, RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where value = 30; +connection con2; +insert into test (id, value) values(3, 30); +commit; +connection con1; +# RC: Returns the newly inserted row +# RR: Still returns nothing +select * from test where value % 3 = 0; +commit; + +--source hermitage_init.inc +connection con1; +update test set value = value + 10; +connection con2; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; +select * from test; +send delete from test where value = 20; +connection con1; +commit; +connection con2; +if ($trx_isolation == "READ COMMITTED") +{ + reap; + # RC: Returns 2 => 30 + select * from test; +} +if ($trx_isolation == "REPEATABLE READ") +{ + --error ER_LOCK_DEADLOCK + reap; + select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; + +} +commit; + + +### Lost Update (P4) -- RC does not prevent, RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where id = 1; +connection con2; +select * from test where id = 1; +connection con1; +update test set value = 11 where id = 1; +connection con2; +send update test set value = 12 where id = 1; +connection con1; +commit; +connection con2; +if ($trx_isolation == "READ COMMITTED") +{ + reap; + # RC: Returns 1 => 12 + select * from test; +} +if ($trx_isolation == "REPEATABLE READ") +{ + --error ER_LOCK_DEADLOCK + reap; +} +commit; + + +### Read Skew (G-single) -- RC does not prevent, RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where id = 1; +connection con2; +select * from test where id = 1; +select * from test where id = 2; +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +select * from test where id = 2; # RC shows 18, RR shows 20 +commit; + +# test using predicate dependencies +--source hermitage_init.inc + +connection con1; +select * from test where value % 5 = 0; +connection con2; +update test set value = 12 where value = 10; +commit; +connection con1; +# RC: returns 1 => 12, RR: returns nothing +select * from test where value % 3 = 0; +commit; + +# on a write predicate +--source hermitage_init.inc + +connection con1; +select * from test where id = 1; +connection con2; +select * from test; +update test set value = 12 where id = 1; +update test set value = 18 where id = 2; +commit; +connection con1; +if ($trx_isolation == "READ COMMITTED") +{ + delete from test where value = 20; # doesn't delete anything + select * from test where id = 2; # shows 2 => 18 +} +if ($trx_isolation == "REPEATABLE READ") +{ + --error ER_LOCK_DEADLOCK + delete from test where value = 20; +} +commit; + + +### Write Skew (G2-item) -- Neither RC/RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where id in (1,2); +connection con2; +select * from test where id in (1,2); +connection con1; +update test set value = 11 where id = 1; +connection con2; +update test set value = 21 where id = 2; +connection con1; +commit; +connection con2; +commit; + +### Anti-Dependency Cycles (G2) -- Neither RC/RR prevents +--source hermitage_init.inc + +connection con1; +select * from test where value % 3 = 0; +connection con2; +select * from test where value % 3 = 0; +connection con1; +insert into test (id, value) values(3, 30); +connection con2; +insert into test (id, value) values(4, 42); +connection con1; +commit; +connection con2; +commit; +select * from test where value % 3 = 0; # Either. Returns 3 => 30, 4 => 42 +connection con1; +select * from test where value % 3 = 0; + + +connection default; +drop table test; + +disconnect con1; +disconnect con2; +disconnect con3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test new file mode 100644 index 0000000000000..e4138e8d89fea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage.test @@ -0,0 +1,10 @@ +--source include/have_rocksdb.inc + +# Hermitage is an attempt to test transaction isolation levels. +# https://github.com/ept/hermitage + +let $trx_isolation = READ COMMITTED; +--source hermitage.inc + +let $trx_isolation = REPEATABLE READ; +--source hermitage.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc b/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc new file mode 100644 index 0000000000000..4f3f03efab0b2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/hermitage_init.inc @@ -0,0 +1,8 @@ +connection con1; +truncate table test; +insert into test (id, value) values (1, 10), (2, 20); +begin; +connection con2; +begin; +connection con3; +begin; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test new file mode 100644 index 0000000000000..7dc3c207eccd9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/i_s_ddl.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc + +# +# Information Schema DDL +# + +--disable_warnings +DROP TABLE IF EXISTS is_ddl_t1; +DROP TABLE IF EXISTS is_ddl_t2; +--enable_warnings + +CREATE TABLE is_ddl_t1 (i INT, j INT, k INT, l INT, + PRIMARY KEY (i), KEY (j), KEY (k, l) COMMENT 'kl_cf') + ENGINE = ROCKSDB; + +CREATE TABLE is_ddl_t2 (x INT, y INT, z INT, + PRIMARY KEY (z, y) COMMENT 'zy_cf', + KEY (x)) ENGINE = ROCKSDB; +--sorted_result +SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; + +# cleanup +DROP TABLE is_ddl_t1; +DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.inc b/storage/rocksdb/mysql-test/rocksdb/t/index.inc new file mode 100644 index 0000000000000..6b4e4ff233bd8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index.inc @@ -0,0 +1,121 @@ +# +# Basic syntax related to indexes: +# unique and non-unique keys, +# single- and multi-column keys, +# index option COMMENT. +# +# See other index* tests for operations +# which are less likely to be supported +# +# PRIMARY KEY syntax is covered in index_primary test. +# Index types BTREE|HASH -- in index_type_btree|hash tests. +# SPATIAL -- in type_spatial_indexes test. +# FULLTEXT -- in fulltext_search test. +# KEY_BLOCK_SIZE -- in index_key_block_size test. +# +# Usage to call the test from another test: +# +# A calling test may define $index_type, in which case +# USING clause will be added to the syntax. +# + +################################################ +# TODO: +# A part of the test is disabled because unique indexes +# are not currently supported +################################################ + + +let $using_index_type = ; +if ($index_type) +{ + let $using_index_type = USING $index_type; +} + + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY $using_index_type (a) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY a_b $using_index_type (a,b) COMMENT 'a_b index' +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY $using_index_type (a), + KEY $using_index_type (b) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +--disable_parsing + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + UNIQUE INDEX $using_index_type (a) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY,ER_DUP_KEY +INSERT INTO t1 (a,b) VALUES (1,'c'); + +DROP TABLE t1; + +--source drop_table_sync.inc + +--enable_parsing + +# +# ALTER TABLE +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'z'); + +eval ALTER TABLE t1 ADD KEY (a) $using_index_type COMMENT 'simple index on a'; +--replace_column 7 # +SHOW INDEX FROM t1; +ALTER TABLE t1 DROP KEY a; +DROP TABLE t1; + +--disable_parsing + +eval CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT AUTO_INCREMENT PRIMARY KEY, + UNIQUE INDEX $using_index_type (a) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY,ER_DUP_KEY +INSERT INTO t1 (a,b) VALUES (1,'c'); + +ALTER TABLE t1 DROP INDEX a; +INSERT INTO t1 (a,b) VALUES (1,'c'); +--error ER_DUP_ENTRY +eval ALTER TABLE t1 ADD UNIQUE INDEX a(a) $using_index_type; +DROP TABLE t1; + +--enable_parsing + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index.test b/storage/rocksdb/mysql-test/rocksdb/t/index.test new file mode 100644 index 0000000000000..e0c6175b397ca --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc + +# +# Basic syntax related to indexes: +# unique and non-unique keys, +# single- and multi-column keys, +# index option COMMENT. +# +# See other index* tests for operations +# which are less likely to be supported +# +# PRIMARY KEY syntax is covered in index_primary test. +# Index types BTREE|HASH -- in index_type_btree|hash tests. +# SPATIAL -- in type_spatial_indexes test. +# FULLTEXT -- in fulltext_search test. +# KEY_BLOCK_SIZE -- in index_key_block_size test. +# + +# (Default index type) + +--source index.inc + +--echo # +--echo # Issue #376: MyRocks: ORDER BY optimizer is unable to use the index extension +--echo # +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int); +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( + pk int not null, + a int not null, + b int not null, + primary key(pk), + key(a) +) engine=rocksdb; +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; + +--echo # This must have type=range, index=a, and must not have 'Using filesort': +--replace_column 9 # +explain select * from t2 force index (a) where a=0 and pk>=3 order by pk; + +drop table t0,t1,t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt new file mode 100644 index 0000000000000..436edf2b40c61 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map-master.opt @@ -0,0 +1 @@ +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test new file mode 100644 index 0000000000000..1021846c50834 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_file_map.test @@ -0,0 +1,51 @@ +--source include/have_rocksdb.inc + +# +# Information Schema index file map +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + + +CREATE TABLE t1 (i INT PRIMARY KEY, j INT, INDEX(j)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT PRIMARY KEY, l INT REFERENCES t1.i) ENGINE = ROCKSDB; + +INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10); +INSERT INTO t2 VALUES (100,1), (200,2), (300,3), (400,4); + +COMMIT; + +# Flush memtable out to SST +SET GLOBAL rocksdb_force_flush_memtable_now = 1; + +############################################################################### +# Test that expected index_file_map data exists +############################################################################### + +# Return the data for the primary key of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY"); + +# Return the data for the secondary index of t1 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't1' AND INDEX_NAME = "j"); + +# Return the data for the primary index of t2 +--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP +WHERE INDEX_NUMBER = + (SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL + WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY"); + +# cleanup +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test b/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test new file mode 100644 index 0000000000000..f156aec0021c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_key_block_size.test @@ -0,0 +1,70 @@ +--source include/have_rocksdb.inc + +# +# KEY_BLOCK_SIZE index option. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY (a) KEY_BLOCK_SIZE=8 +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT PRIMARY KEY, + KEY ind1(b ASC) KEY_BLOCK_SIZE=0 +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8), + PRIMARY KEY ind2(b(1) DESC) KEY_BLOCK_SIZE=32768 COMMENT 'big key_block_size value' +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; + +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8), + pk INT AUTO_INCREMENT PRIMARY KEY, + KEY a_b(a,b) KEY_BLOCK_SIZE=8192 +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; + +DROP TABLE t1; + +--source drop_table_sync.inc + +# +# ALTER TABLE +# + +CREATE TABLE t1 (a INT, + b CHAR(8), + PRIMARY KEY (b) +) ENGINE=rocksdb; + +INSERT INTO t1 (a,b) VALUES (100,'z'); + +ALTER TABLE t1 ADD KEY(a) KEY_BLOCK_SIZE 8192; +--replace_column 7 # +SHOW INDEX FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test b/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test new file mode 100644 index 0000000000000..3abd2dd05feb8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_primary.test @@ -0,0 +1,64 @@ +--source include/have_rocksdb.inc + +# +# Basic syntax related to primary keys +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, + b CHAR(8) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW KEYS IN t1; + +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'c'); + +DROP TABLE t1; + +--error ER_MULTIPLE_PRI_KEY +CREATE TABLE t1 (a INT PRIMARY KEY, + b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; + +CREATE TABLE t1 (a INT, + b CHAR(8), + PRIMARY KEY (a,b) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; +INSERT INTO t1 (a,b) VALUES (1,'a'),(1,'b'),(2,'a'),(2,'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'b'); + DROP TABLE t1; + +# KEY in a column definition means PK! + +CREATE TABLE t1 (a INT KEY, + b CHAR(8), + KEY (b) +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, + b CHAR(8) PRIMARY KEY +) ENGINE=rocksdb; + +--replace_column 7 # +SHOW INDEX IN t1; + +--error ER_MULTIPLE_PRI_KEY +ALTER TABLE t1 ADD CONSTRAINT PRIMARY KEY pk (a); +--replace_column 7 # +SHOW KEYS IN t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test b/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test new file mode 100644 index 0000000000000..4adc5b55329b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_type_btree.test @@ -0,0 +1,12 @@ +--source include/have_rocksdb.inc + +# +# Index type BTREE +# + +let $index_type = BTREE; + +--source index.inc + +let $index_type =; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test b/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test new file mode 100644 index 0000000000000..f3dc9cf5f10a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_type_hash.test @@ -0,0 +1,12 @@ +--source include/have_rocksdb.inc + +# +# Index type HASH +# + +let $index_type = HASH; + +--source index.inc + +let $index_type =; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt new file mode 100644 index 0000000000000..40b14167e17a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema-master.opt @@ -0,0 +1 @@ +--binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test new file mode 100644 index 0000000000000..b1adc16f6a2f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test @@ -0,0 +1,80 @@ +--source include/have_rocksdb.inc +--source include/have_log_bin.inc + +--source include/restart_mysqld.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +--enable_warnings + +--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) +--replace_result $max_index_id max_index_id +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; + +select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; + +CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; +INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3); + +--let $MASTER_UUID = query_get_value(SELECT @@SERVER_UUID, @@SERVER_UUID, 1) +--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) +--replace_result $MASTER_UUID uuid $max_index_id max_index_id +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; +select count(*) from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO; + +set global rocksdb_force_flush_memtable_now = true; +set global rocksdb_compact_cf='default'; +select case when VALUE-@keysIn >= 3 then 'true' else 'false' end from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn'; + +CREATE INDEX tindex1 on t1 (i1); +--let $start_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) + +CREATE INDEX tindex2 on t1 (i2); +--let $end_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) + +if ($end_max_index_id <= $start_max_index_id) { + echo Max index ID did not increase; +} + +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; + +CREATE TABLE t2 ( + a int, + b int, + c int, + d int, + e int, + PRIMARY KEY (a) COMMENT "cf_a", + KEY (b) COMMENT "cf_b", + KEY (c) COMMENT "cf_c", + KEY (d) COMMENT "$per_index_cf", + KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB; + +select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; + +CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB; +insert into t3 (a) values (1), (2), (3); +SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK; +--let $t3_index_id = query_get_value(SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't3', INDEX_NUMBER, 1) +--let $t3_cf_id = query_get_value(SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME = 't3', COLUMN_FAMILY, 1) +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=1; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +DROP TABLE t3; +--let $result = query_get_value("SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO WHERE TYPE = 'DDL_DROP_INDEX_ONGOING' AND NAME LIKE 'cf_id:$t3_cf_id,index_id:$t3_index_id'", NAME, 1) +--echo $result +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +--echo next line shouldn't cause assertion to fail +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; +SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK; + +DROP TABLE t1; +DROP TABLE t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc new file mode 100644 index 0000000000000..dda253bc3465e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/init_stats_procedure.inc @@ -0,0 +1,40 @@ +# This inc script creates two procedures -- save_read_stats() and +# get_read_stats(). get_read_stats() prints differential rocksdb_rows_read, +# rocksdb_rows_updated, and rocksdb_rows_deleted values since calling +# save_read_stats(). + +delimiter //; +create procedure save_read_stats() +begin + /*select rows_requested into @rq from information_schema.table_statistics + where table_schema=database() and table_name='t1';*/ + select rows_read into @rr_is from information_schema.table_statistics + where table_schema=database() and table_name='t1'; + select variable_value into @rr from information_schema.global_status + where variable_name='rocksdb_rows_read'; + select variable_value into @ru from information_schema.global_status + where variable_name='rocksdb_rows_updated'; + select variable_value into @rd from information_schema.global_status + where variable_name='rocksdb_rows_deleted'; +end// + +create procedure get_read_stats() +begin + /*select rows_requested - @rq as rows_requested from + information_schema.table_statistics + where table_schema=database() and table_name='t1';*/ + select rows_read - @rr_is as rows_read_userstat from + information_schema.table_statistics + where table_schema=database() and table_name='t1'; + select variable_value - @rr as rows_read from + information_schema.global_status + where variable_name='rocksdb_rows_read'; + select variable_value - @ru as rows_updated from + information_schema.global_status + where variable_name='rocksdb_rows_updated'; + select variable_value - @rd as rows_deleted from + information_schema.global_status + where variable_name='rocksdb_rows_deleted'; +end// +delimiter ;// + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt new file mode 100644 index 0000000000000..b3565b5fa82ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled-master.opt @@ -0,0 +1,30 @@ +--loose-enable-innodb_trx +--loose-enable-innodb_file_status +--loose-enable-innodb_locks +--loose-enable-innodb_lock_waits +--loose-enable-innodb_cmp +--loose-enable-innodb_cmp_reset +--loose-enable-innodb_cmp_per_index +--loose-enable-innodb_cmp_per_index_reset +--loose-enable-innodb_cmpmem +--loose-enable-innodb_cmpmem_reset +--loose-enable-innodb_metrics +--loose-enable-innodb_ft_default_stopword +--loose-enable-innodb_ft_deleted +--loose-enable-innodb_ft_being_deleted +--loose-enable-innodb_ft_index_cache +--loose-enable-innodb_ft_index_table +--loose-enable-innodb_ft_config +--loose-enable-innodb_buffer_pool_stats +--loose-enable-innodb_buffer_page +--loose-enable-innodb_buffer_page_lru +--loose-enable-innodb_sys_tables +--loose-enable-innodb_sys_tablestats +--loose-enable-innodb_sys_indexes +--loose-enable-innodb_sys_columns +--loose-enable-innodb_sys_fields +--loose-enable-innodb_sys_foreign +--loose-enable-innodb_sys_foreign_cols +--loose-enable-innodb_sys_tablespaces +--loose-enable-innodb_sys_datafiles +--loose-enable-innodb_sys_docstore_fields diff --git a/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test new file mode 100644 index 0000000000000..4ff48e130898d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/innodb_i_s_tables_disabled.test @@ -0,0 +1,43 @@ +--source include/have_rocksdb.inc + +# Make sure that the InnoDb information schema tables are disabled when InnoDB +# is turned off and attempting to access them doesn't crash. + +# Disable warnings, as the table names in warnings appear in lower or uppercase +# depending on platform + +--disable_warnings + +SELECT * FROM INFORMATION_SCHEMA.INNODB_TRX; +#Not in MariaDB: SELECT * FROM INFORMATION_SCHEMA.INNODB_FILE_STATUS; + +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCKS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_LOCK_WAITS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_RESET; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX_RESET; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM; +SELECT * FROM INFORMATION_SCHEMA.INNODB_CMPMEM_RESET; +SELECT * FROM INFORMATION_SCHEMA.INNODB_METRICS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DEFAULT_STOPWORD; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_DELETED; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE; +SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_CONFIG; +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE; +SELECT * FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES; +SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES; +#Not in MariaDB: SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_DOCSTORE_FIELDS; + +--enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert.test b/storage/rocksdb/mysql-test/rocksdb/t/insert.test new file mode 100644 index 0000000000000..14cfe1cadb86f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert.test @@ -0,0 +1,99 @@ +--source include/have_rocksdb.inc + +# +# Basic INSERT statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +# INSERT [INTO] .. VALUES|VALUE .. + +INSERT INTO t1 VALUES (100,'foobar',100),(1,'a',101),(2,'b',103),(3,'c',104),(4,'d',105),(5,'e',106); +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 VALUE (10,'foo',107),(11,'abc',108); +--sorted_result +SELECT a,b FROM t1; + +INSERT INTO t1 (b,a) VALUES ('test',0); +--sorted_result +SELECT a,b FROM t1; + +INSERT INTO t1 VALUES (DEFAULT,DEFAULT,NULL); +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 (a) VALUE (10),(20); +--sorted_result +SELECT a,b FROM t1; + +# INSERT [INTO] .. SET + +INSERT INTO t1 SET a = 11, b = 'f'; +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 SET b = DEFAULT; +--sorted_result +SELECT a,b FROM t1; + + +# INSERT .. SELECT + +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t2 SELECT a,b,pk FROM t1; +INSERT INTO t1 (a) SELECT a FROM t2 WHERE b = 'foo'; +--sorted_result +SELECT a,b FROM t1; + +INSERT t1 (a,b) SELECT a,b FROM t1; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1, t2; + +# +# Transactional INSERT +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +BEGIN; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'foo'); +INSERT t1 (a,b) VALUE (10,'foo'),(11,'abc'); +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +BEGIN; +INSERT INTO t1 (b,a) VALUES ('test',0); +SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +RELEASE SAVEPOINT spt1; +INSERT INTO t1 (a,b) VALUES (DEFAULT,DEFAULT); +ROLLBACK; +--sorted_result +SELECT a,b FROM t1; + +BEGIN; +INSERT t1 (a) VALUE (10),(20); +SAVEPOINT spt1; +INSERT INTO t1 SET a = 11, b = 'f'; +INSERT t1 SET b = DEFAULT; +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT spt1; +INSERT INTO t1 (b,a) VALUES ('test1',10); +--error ER_UNKNOWN_ERROR +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt new file mode 100644 index 0000000000000..a0bf5759ec4bd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config-master.opt @@ -0,0 +1,7 @@ +--rocksdb_write_disable_wal=1 +--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=256;level0_stop_writes_trigger=256;max_write_buffer_number=16;compression_per_level=kNoCompression;memtable=vector:1024 +--rocksdb_override_cf_options=__system__={memtable=skip_list:16} +--rocksdb_compaction_sequential_deletes=0 +--rocksdb_compaction_sequential_deletes_window=0 +--rocksdb_allow_concurrent_memtable_write=0 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test new file mode 100644 index 0000000000000..3e1cf7375e801 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_optimized_config.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1( + id bigint not null primary key, + i1 bigint, #unique + i2 bigint, #repeating + c1 varchar(20), #unique + c2 varchar(20), #repeating + index t1_2(i1) +) engine=rocksdb; + +--disable_query_log +set rocksdb_bulk_load=1; +let $i=0; +while ($i<50000) +{ + inc $i; + eval insert t1(id, i1, i2, c1, c2) values($i, $i, $i div 10, $i, $i div 10); +} +set rocksdb_bulk_load=0; +--enable_query_log + +select count(*), sum(id), sum(i1), sum(i2) from t1; + +# reload without load optimized config +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_write_disable_wal=0 --rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select count(*), sum(id), sum(i1), sum(i2) from t1; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test new file mode 100644 index 0000000000000..b2f37a0799976 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert_with_keys.test @@ -0,0 +1,93 @@ +--source include/have_rocksdb.inc + +# +# INSERT statements for tables with keys +# + +################################################## +# TODO: +# A part of the test is disabled because currently +# unique indexes are not supported +################################################## + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, KEY(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo #---------------------------------------- +--echo # UNIQUE KEYS are not supported currently +--echo #----------------------------------------- + +--disable_parsing + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +INSERT INTO t1 (a,b) VALUES (0,''); +--sorted_result +SELECT a,b FROM t1; + +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (3,'a'),(4,'d') ON DUPLICATE KEY UPDATE a = a+10; + +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a,b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +INSERT INTO t1 (a,b) VALUES (100,'b'), (2,'c'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +--sorted_result +SELECT a,b FROM t1; + +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE a = a+VALUES(a); +--sorted_result +SELECT a,b FROM t1; + +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (101,'x'),(101,'x'); +DROP TABLE t1; + +--enable_parsing + + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +INSERT INTO t1 (a,b) VALUES (100,'a'), (6,'f'); +INSERT INTO t1 (a,b) VALUES (30,'m'),(29,'n'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (3,'a'),(0,''); +INSERT INTO t1 (a,b) VALUES (0,''); +--sorted_result +SELECT a,b FROM t1; + +INSERT IGNORE INTO t1 (a,b) VALUES (1,'a'),(12345,'z'); +INSERT INTO t1 (a,b) VALUES (1,'a'),(12345,'z') ON DUPLICATE KEY UPDATE b = CONCAT(b,b); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100.test b/storage/rocksdb/mysql-test/rocksdb/t/issue100.test new file mode 100644 index 0000000000000..b0b3eb7a8fd6b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +create table t1 ( + id int, + value int, + primary key (id) +) engine=rocksdb; + +insert into t1 values(1,1),(2,2); +set autocommit=0; + +begin; +insert into t1 values (50,50); +select * from t1; + +update t1 set id=id+100; + +select * from t1; + +rollback; +set autocommit=1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt new file mode 100644 index 0000000000000..436edf2b40c61 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete-master.opt @@ -0,0 +1 @@ +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test new file mode 100644 index 0000000000000..743bf7dd7a17e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue100_delete.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; + +create table t100(pk int primary key, a int, b int, key(a)); +insert into t100 select a,a,a from test.one_k; + +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; + +update t100 set a=a+1; +set global rocksdb_force_flush_memtable_now=1; +select num_rows, entry_deletes, entry_singledeletes from information_schema.rocksdb_index_file_map where index_number = (select max(index_number) from information_schema.rocksdb_index_file_map) order by entry_deletes, entry_singledeletes; + +drop table ten, t100, one_k; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue111.test b/storage/rocksdb/mysql-test/rocksdb/t/issue111.test new file mode 100644 index 0000000000000..671ea4708d60a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue111.test @@ -0,0 +1,38 @@ +--source include/have_rocksdb.inc + +connect (con2,localhost,root,,); +connection default; + +create table t1 ( + pk int not null primary key, + col1 int not null, + col2 int not null, + key(col1) +) engine=rocksdb; + +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; + +insert into t1 select a,a,a from one_k; + +--echo # Start the transaction, get the snapshot +begin; +select * from t1 where col1<10; + +--echo # Connect with another connection and make a conflicting change +connection con2; + +begin; +update t1 set col2=123456 where pk=0; +commit; + +connection default; + +--error ER_LOCK_DEADLOCK +update t1 set col2=col2+1 where col1 < 10 limit 5; + +disconnect con2; +drop table t1, ten, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue290.test b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test new file mode 100644 index 0000000000000..5ea8799c627f3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue290.test @@ -0,0 +1,40 @@ +--source include/have_rocksdb.inc + +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (link_type, `id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`id2`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=RocksDB DEFAULT COLLATE=latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $insert = INSERT INTO linktable (id1, link_type, id2) values (1, 1, $i); + inc $i; + eval $insert; +} +--enable_query_log +set global rocksdb_force_flush_memtable_now=1; + +insert into linktable (id1, link_type, id2) values (2, 1, 1); +insert into linktable (id1, link_type, id2) values (2, 1, 2); +insert into linktable (id1, link_type, id2) values (2, 1, 3); +insert into linktable (id1, link_type, id2) values (2, 1, 4); +insert into linktable (id1, link_type, id2) values (2, 1, 5); +insert into linktable (id1, link_type, id2) values (2, 1, 6); +insert into linktable (id1, link_type, id2) values (2, 1, 7); +insert into linktable (id1, link_type, id2) values (2, 1, 8); +insert into linktable (id1, link_type, id2) values (2, 1, 9); +insert into linktable (id1, link_type, id2) values (2, 1, 10); + +--replace_column 9 # +explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5); + +drop table linktable; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue314.test b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test new file mode 100644 index 0000000000000..2059eef2195b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a int); +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +INSERT INTO t1 VALUES(1); +--error ER_UNKNOWN_ERROR +select * from t1; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +select * from t1; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test new file mode 100644 index 0000000000000..5dcc7c19ba957 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test @@ -0,0 +1,34 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc +--disable_warnings +drop table if exists t; +--enable_warnings + +create table t ( + a int, + b int, + c varchar(12249) collate latin1_bin, + d datetime, + e int, + f int, + g blob, + h int, + i int, + key (b,e), + key (h,b) +) engine=rocksdb +partition by linear hash (i) partitions 8 ; + +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +insert into t values (1,1,'a',now(),1,1,'a',1,1); +select i from t group by h; +select i from t group by h; + +drop table t; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test b/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test new file mode 100644 index 0000000000000..bedf87a8efa19 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_read_committed.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = READ COMMITTED; + +--source transaction_isolation.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test b/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test new file mode 100644 index 0000000000000..8ee3af60bf3a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_read_uncommitted.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = READ UNCOMMITTED; +--source transaction_isolation.inc +--source consistent_snapshot.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test b/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test new file mode 100644 index 0000000000000..cf29073f69ec9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_repeatable_read.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = REPEATABLE READ; +--source transaction_isolation.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test b/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test new file mode 100644 index 0000000000000..9b5db0e899823 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/level_serializable.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +let $trx_isolation = SERIALIZABLE; +--source transaction_isolation.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc new file mode 100644 index 0000000000000..5d3678f5f27c3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc @@ -0,0 +1,117 @@ +--source include/have_rocksdb.inc + +# +# Basic LOAD DATA statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +let $datadir = `SELECT @@datadir`; + +--write_file $datadir/se_loaddata.dat +1,foo, +2,bar, +3,, +4,abc, +EOF + +--replace_result $datadir +eval +LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + FIELDS TERMINATED BY ',' (a,b); +--sorted_result +SELECT a,b FROM t1; + +--replace_result $datadir +eval +LOAD DATA LOCAL INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + CHARACTER SET utf8 COLUMNS TERMINATED BY ',' + ESCAPED BY '/' (a,b); +--sorted_result +SELECT a,b FROM t1; + +--remove_file $datadir/se_loaddata.dat +--write_file $datadir/se_loaddata.dat +5;YYY; +102;'zzz'; +0;'test'; +EOF + +--replace_result $datadir +eval +LOAD DATA LOCAL INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + FIELDS TERMINATED BY ';' + (a) SET b='loaded'; + +--sorted_result +SELECT a,b FROM t1; + +--remove_file $datadir/se_loaddata.dat +--write_file $datadir/se_loaddata.dat +prefix:5;'foo'; +prefix:6;''; +prefix:100;foo; +prefix:7;'test';suffix +101;abc; +102;'z'; +prefix:0;; +EOF + +--replace_result $datadir +eval +LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1 + FIELDS TERMINATED BY ';' + OPTIONALLY ENCLOSED BY '''' + LINES STARTING BY 'prefix:' + IGNORE 2 LINES (a,b); + +--sorted_result +SELECT a,b FROM t1; + +--remove_file $datadir/se_loaddata.dat +--write_file $datadir/se_loaddata.dat +1 foo +2 bar +3 +4 abc +EOF + +--replace_result $datadir +eval +LOAD DATA INFILE '$datadir/se_loaddata.dat' INTO TABLE t1; +--sorted_result +SELECT a,b FROM t1; +--remove_file $datadir/se_loaddata.dat + + +--write_file $datadir/se_replacedata.dat +1 aaa +2 bbb +3 ccc +4 ddd +EOF + +--replace_result $datadir +if ($skip_unique_check == 1) +{ + --error ER_UNKNOWN_ERROR + eval + LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1; +} +if ($skip_unique_check == 0) +{ + eval + LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1; +} +--sorted_result +SELECT a,b FROM t1; +--remove_file $datadir/se_replacedata.dat + + +# Cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test new file mode 100644 index 0000000000000..1f59d5ce204a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +let $skip_unique_check = 0; +--source loaddata.inc +let $skip_unique_check = 1; +set session unique_checks=0; +--source loaddata.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock.test b/storage/rocksdb/mysql-test/rocksdb/t/lock.test new file mode 100644 index 0000000000000..015a08bef5733 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock.test @@ -0,0 +1,202 @@ +--source include/have_rocksdb.inc + +# Standard table locking: +# LOCK TABLE .. READ +# LOCK TABLE .. [LOW_PRIORITY] WRITE +# UNLOCK TABLES +# +# and global locking: +# FLUSH TABLES [..] WITH READ LOCK +# +############################################ +# TODO: +# A part of the test is currently disabled +# because temporary tables are not supported +############################################ + +--enable_connect_log + +# Save the initial number of concurrent sessions. +--source include/count_sessions.inc + +connect (con1,localhost,root,,); +SET lock_wait_timeout=1; + +connection default; + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (id,id2) VALUES (1,1),(1,2),(1,3); + +# LOW_PRIORITY has no effect, but is still syntactically correct +LOCK TABLE t1 LOW_PRIORITY WRITE; +SELECT id2,COUNT(DISTINCT id) FROM t1 GROUP BY id2; + +UPDATE t1 SET id=-1 WHERE id=1; + +connection con1; +# With WRITE lock held by connection 'default', +# nobody else can access the table +--error ER_LOCK_WAIT_TIMEOUT +SELECT id,id2 FROM t1; +--error ER_LOCK_WAIT_TIMEOUT +LOCK TABLE t1 READ; + +connection default; +LOCK TABLE t1 READ; +--error ER_TABLE_NOT_LOCKED_FOR_WRITE +UPDATE t1 SET id=1 WHERE id=1; + +connection con1; +# With READ lock held by connection 'default', +# it should be possible to read from the table +# or acquire another READ lock, +# but not update it or acquire WRITE lock +SELECT COUNT(DISTINCT id) FROM t1; +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id=2 WHERE id=2; +--error ER_LOCK_WAIT_TIMEOUT +LOCK TABLE t1 WRITE; +LOCK TABLE t1 READ; +UNLOCK TABLES; + + +--connection default + +--error ER_TABLE_NOT_LOCKED +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; + +--disable_parsing + +CREATE TEMPORARY TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +DROP TABLE IF EXISTS t2; + +--enable_parsing + +UNLOCK TABLES; + +CREATE TABLE t2 (id INT, id2 INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLE t1 WRITE, t2 WRITE; +INSERT INTO t2 (id,id2) SELECT id,id2 FROM t1; +UPDATE t1 SET id=1 WHERE id=-1; +DROP TABLE t1,t2; + +# +# INSERT ... SELECT with lock tables +# + +CREATE TABLE t1 (i1 INT, nr INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +CREATE TABLE t2 (nr INT, nm INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (nr,nm) VALUES (1,3); +INSERT INTO t2 (nr,nm) VALUES (2,4); + +LOCK TABLES t1 WRITE, t2 READ; +INSERT INTO t1 (i1,nr) SELECT 1, nr FROM t2 WHERE nm=3; +INSERT INTO t1 (i1,nr) SELECT 2, nr FROM t2 WHERE nm=4; +UNLOCK TABLES; + +LOCK TABLES t1 WRITE; +--error ER_TABLE_NOT_LOCKED +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1; +UNLOCK TABLES; +LOCK TABLES t1 WRITE, t1 AS t1_alias READ; +INSERT INTO t1 (i1,nr) SELECT i1, nr FROM t1 AS t1_alias; +--error ER_TABLE_NOT_LOCKED +DROP TABLE t1,t2; +UNLOCK TABLES; +DROP TABLE t1,t2; + +# +# Check that a dropped table is removed from a lock + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE; +# This removes one table after the other from the lock. +DROP TABLE t2, t3, t1; +# +# Check that a lock merge works + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb; +LOCK TABLES t1 WRITE, t2 WRITE, t3 WRITE, t1 AS t4 READ; + +ALTER TABLE t2 ADD COLUMN c2 INT; + +DROP TABLE t1, t2, t3; + +# FLUSH TABLES is not permitted when there is an active LOCK TABLES .. READ, +# FLUSH TABLES .. WITH READ LOCK should be used instead +# (and for other connections the table is locked) + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; + +LOCK TABLE t1 READ, t2 READ; +--error ER_TABLE_NOT_LOCKED_FOR_WRITE +FLUSH TABLE t1; +--error ER_TABLE_NOT_LOCKED_FOR_WRITE +FLUSH TABLES; +--error ER_LOCK_OR_ACTIVE_TRANSACTION +FLUSH TABLES t1, t2 WITH READ LOCK; +UNLOCK TABLES; + +FLUSH TABLES t1, t2 WITH READ LOCK; + +--connection con1 +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +--connection default +UNLOCK TABLES; + +# Global lock + +FLUSH TABLES WITH READ LOCK; + +--connection con1 +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + +--connection default +UNLOCK TABLES; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +FLUSH TABLES WITH READ LOCK; +--error ER_CANT_UPDATE_WITH_READLOCK +DROP TABLE t1, t2; +UNLOCK TABLES; +DROP TABLE t1, t2; + +--disconnect con1 + +# +# Ensure that FLUSH TABLES doesn't substitute a base locked table +# with a temporary one. +# + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +CREATE TABLE t2 (a INT, b CHAR(8), PRIMARY KEY(b)) ENGINE=rocksdb; + +LOCK TABLE t1 WRITE, t2 WRITE; + +--disable_parsing + +CREATE TEMPORARY TABLE t1 (a INT, b CHAR(8), PRIMARY KEY(a)) ENGINE=rocksdb; +FLUSH TABLE t1; +DROP TEMPORARY TABLE t1; + +--enable_parsing + +SELECT a,b FROM t1; +UNLOCK TABLES; +DROP TABLE t1, t2; + +# Check that all connections opened by test cases in this file are really +# gone so execution of other tests won't be affected by their presence. +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_info.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_info.test new file mode 100644 index 0000000000000..1b624cf38c054 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_info.test @@ -0,0 +1,31 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +create table t1 (a int, primary key(a) comment 'lock_into_cf1') engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); + +create table t2 (a int, primary key(a) comment 'lock_info_cf2') engine=rocksdb; +insert into t2 values (1); +insert into t2 values (2); + +set autocommit=0; +select * from t1 for update; +select * from t2 for update; + +use information_schema; +--replace_column 2 _txn_id_ 3 _key_ +select rocksdb_ddl.cf, rocksdb_locks.transaction_id, rocksdb_locks.key +from rocksdb_locks +left join rocksdb_ddl +on rocksdb_locks.column_family_id=rocksdb_ddl.column_family +order by rocksdb_ddl.cf; + +use test; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test new file mode 100644 index 0000000000000..d8a6bde45c899 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_rows_not_exist.test @@ -0,0 +1,110 @@ +--source include/have_rocksdb.inc + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +#1. Using all primary key columns, with equal conditions +connection con1; +CREATE TABLE t (id1 int, id2 int, id3 int, value int, PRIMARY KEY (id1, id2, id3)) ENGINE=RocksDB; + +#1.1 SELECT FOR UPDATE +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; + +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; + +#1.2 UPDATE +connection con1; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; + +#1.3 DELETE +connection con1; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +--error ER_LOCK_WAIT_TIMEOUT +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; + +--disable_parsing +# +# The following is commented out because RocksDB's Transaction API doesn't +# "support" READ COMMITED, in particular, it doesn't release row locks +# after each statement. (MyRocks is able to request a new snapshot for +# every statement, but this won't free the locks. TODO: Is the behavior +# that is tested below really needed?) +# +connection con1; +ROLLBACK; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; + +connection con2; +ROLLBACK; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=1 FOR UPDATE; +SELECT * FROM t WHERE id1=1 AND id2=1 AND id3=2 FOR UPDATE; + +connection con1; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=1 AND id3=1; +UPDATE t SET value=value+100 WHERE id1=1 AND id2=0 AND id3=1; + +connection con1; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; + +connection con2; +ROLLBACK; +BEGIN; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=1; +DELETE FROM t WHERE id1=1 AND id2=1 AND id3=0; + +connection con1; +ROLLBACK; +connection con2; +ROLLBACK; + +--enable_parsing +connection default; +disconnect con1; +disconnect con2; + +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test new file mode 100644 index 0000000000000..18a796573d1fc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/locking_issues.test @@ -0,0 +1,67 @@ +--source include/have_rocksdb.inc + +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case1_1.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case1_1.inc + +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case1_2.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case1_2.inc + +let $lock_scanned_rows=0; +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case2.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case2.inc + +# Rerun the case2 tests with rocksdb_lock_scanned_rows on +let $lock_scanned_rows=1; +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case2.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case2.inc + +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case3.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case3.inc + +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case4.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case4.inc + +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case5.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case5.inc + +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case6.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case6.inc + +let $lock_scanned_rows=0; +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case7.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case7.inc + +# Rerun the case7 tests with rocksdb_lock_scanned_rows on +let $lock_scanned_rows=1; +let $isolation_level = REPEATABLE READ; +--source include/locking_issues_case7.inc + +let $isolation_level = READ COMMITTED; +--source include/locking_issues_case7.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/misc.test b/storage/rocksdb/mysql-test/rocksdb/t/misc.test new file mode 100644 index 0000000000000..b8efd0389d23c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/misc.test @@ -0,0 +1,45 @@ +--source include/have_rocksdb.inc + +# +# Different statements not related to an engine, +# but added to provide function coverage for handler.cc and handler.h. +# The test can be disabled or removed later. +# + +# hits get_error_message(int, String*) + +INSERT INTO mysql.event ( + db, + name, + body, + definer, + interval_value, + interval_field, + originator, + character_set_client, + collation_connection, + db_collation, + body_utf8) +values ( + database(), + "ev1", + "select 1", + user(), + 100, + "SECOND_MICROSECOND", + 1, + 'utf8', + 'utf8_general_ci', + 'utf8_general_ci', + 'select 1'); + +--error ER_NOT_SUPPORTED_YET +SHOW EVENTS; +DROP EVENT ev1; + +# hits get_foreign_key_list(THD*, List*) +--sorted_result +SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt new file mode 100644 index 0000000000000..53303252212a4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb-master.opt @@ -0,0 +1 @@ +--binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test new file mode 100644 index 0000000000000..0c04b60427eb0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqlbinlog_gtid_skip_empty_trans_rocksdb.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +################################################################################ +# Test the --skip-empty-trans option for mysqlbinlog with rocksdb +################################################################################ +--disable_warnings +--source include/have_gtid.inc +--enable_warnings + +reset master; +set timestamp=1000000000; +let $storage_engine=rocksdb; + +# Test row based replication +set SESSION binlog_format = 'ROW'; +--source include/mysqlbinlog_gtid_skip_empty_trans_input.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt new file mode 100644 index 0000000000000..2672d4ff35e0b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump-master.opt @@ -0,0 +1 @@ +--binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test new file mode 100644 index 0000000000000..9e904908330ea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump.test @@ -0,0 +1,61 @@ +--source include/have_rocksdb.inc +--source include/have_log_bin.inc +--source include/restart_mysqld.inc + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +create table r1 (id1 int, id2 int, id3 varchar(100), id4 int, value1 int, value2 int, value3 int, value4 int, primary key (id1, id2, id3, id4)) engine=rocksdb; +insert into r1 values (1,1,1,1,1,1,1,1); +insert into r1 values (1,1,1,2,2,2,2,2); +insert into r1 values (1,1,2,1,3,3,3,3); +insert into r1 values (1,1,2,2,4,4,4,4); +insert into r1 values (1,2,1,1,5,5,5,5); +insert into r1 values (1,2,1,2,6,6,6,6); +insert into r1 values (1,2,2,1,7,7,7,7); +insert into r1 values (1,2,2,2,8,8,8,8); +insert into r1 values (2,1,1,1,9,9,9,9); +insert into r1 values (2,1,1,2,10,10,10,10); +insert into r1 values (2,1,2,1,11,11,11,11); +insert into r1 values (2,1,2,2,12,12,12,12); +insert into r1 values (2,2,1,1,13,13,13,13); +insert into r1 values (2,2,1,2,14,14,14,14); +insert into r1 values (2,2,2,1,15,15,15,15); +insert into r1 values (2,2,2,2,16,16,16,16); + +connection con2; +BEGIN; +insert into r1 values (5,5,5,5,5,5,5,5); +update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1'; + +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb --order-by-primary-desc --rocksdb_bulk_load test + +rollback; + +connection con1; + +let SEARCH_FILE=$MYSQLTEST_VARDIR/mysqld.1/mysqld.log; +let SEARCH_PATTERN=START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +source include/search_pattern_in_file.inc; + +set @save_default_storage_engine=@@global.default_storage_engine; +SET GLOBAL default_storage_engine=rocksdb; +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test +source include/search_pattern_in_file.inc; + +# Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect) +--echo ==== mysqldump with --innodb-stats-on-metadata ==== +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test + +# testing mysqldump work with statement based binary logging +SET GLOBAL binlog_format=statement; +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test > /dev/null +SET GLOBAL binlog_format=row; + +drop table r1; +reset master; +set @@global.default_storage_engine=@save_default_storage_engine; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt new file mode 100644 index 0000000000000..2672d4ff35e0b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2-master.opt @@ -0,0 +1 @@ +--binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test new file mode 100644 index 0000000000000..3631e703de630 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mysqldump2.test @@ -0,0 +1,43 @@ +--source include/have_rocksdb.inc + +--source include/have_log_bin.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; + +--disable_query_log +let $i = 1; +while ($i <= 50000) { + let $insert = INSERT INTO t1 VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; +} +--enable_query_log + +optimize table t1; + +#wiping block cache +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_block_cache_add'; + +--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --rocksdb test > /dev/null + +# verifying block cache was not filled +select case when variable_value - @a > 20 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; + +select count(*) from t1; + +# verifying block cache was filled +select case when variable_value - @a > 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_block_cache_add'; + +#cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test new file mode 100644 index 0000000000000..9de41d17976f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/negative_stats.test @@ -0,0 +1,26 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (i1 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 10000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i); + inc $i; + eval $insert; +} +--enable_query_log + +SET GLOBAL ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW=1; + +set session debug_dbug= "+d,myrocks_simulate_negative_stats"; +SELECT CASE WHEN DATA_LENGTH < 1024 * 1024 THEN 'true' ELSE 'false' END FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; +set session debug_dbug= "-d,myrocks_simulate_negative_stats"; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test b/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test new file mode 100644 index 0000000000000..ccef7182c11d2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/no_merge_sort.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# For Issue#117 MyRocks does merge sort with small data sets + +let $datadir = `SELECT @@datadir`; + +--disable_query_log +let $i= 0; +while ($i < 30) { + DROP TABLE IF EXISTS ti_nk; + + CREATE TABLE `ti_nk` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` varchar(36) COLLATE latin1_bin NOT NULL, + PRIMARY KEY (`c`) + ); + let $j = 0; + while ($j < 200) { + eval insert into ti_nk values ($j, $j, md5($j)); + inc $j; + } + + select variable_value into @s from information_schema.global_status where variable_name='Sort_merge_passes'; + eval SELECT a, b, c FROM ti_nk ORDER BY a,b,c INTO OUTFILE '$datadir/select.out'; + --remove_file $datadir/select.out + select case when variable_value-@s = 0 then 'true' else 'false' end as skip_merge_sort from information_schema.global_status where variable_name='Sort_merge_passes'; + inc $i; +} +--enable_query_log + +DROP TABLE ti_nk; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc b/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc new file mode 100644 index 0000000000000..0dcd452194a16 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/no_primary_key_basic_ops.inc @@ -0,0 +1,65 @@ +# +# This include file checks some very basic capabilities for restart insert +# update and delete for tables with no pk +# NOTE: requires table with structure similar to +# CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; +# + +SHOW CREATE TABLE t1; +SHOW COLUMNS IN t1; + +### test INSERT +INSERT INTO t1 (a,b) VALUES (76,'bar'); +INSERT INTO t1 (a,b) VALUES (35,'foo'); +INSERT INTO t1 (a,b) VALUES (77,'baz'); + +## test SELECT w/ index scans +--sorted_result +SELECT * FROM t1 WHERE a = 35; +--sorted_result +SELECT * FROM t1 WHERE a = 35 AND b = 'foo'; +--sorted_result +SELECT * FROM t1 WHERE a = 77 OR b = 'bar'; +--sorted_result +SELECT * FROM t1 WHERE a > 35; +--sorted_result +SELECT * FROM t1; + +# test UPDATE +UPDATE t1 SET a=a+100; +--sorted_result +SELECT * FROM t1; + +UPDATE t1 SET a=a-100, b='bbb' WHERE a>100; +--sorted_result +SELECT * FROM t1; +UPDATE t1 SET a=300, b='ccc' WHERE a>70; +--sorted_result +SELECT * FROM t1; +UPDATE t1 SET a=123 WHERE a=35; +--sorted_result +SELECT * FROM t1; +UPDATE t1 SET a=321 WHERE b='ccc'; +--sorted_result +SELECT * FROM t1; + + +## test RESTART/OPEN +--source include/restart_mysqld.inc +## test insert after restart +INSERT INTO t1 (a,b) VALUES (45,'bob'); +--sorted_result +SELECT * FROM t1; + +# test DELETE +DELETE FROM t1 WHERE a=123; +--sorted_result +SELECT * FROM t1; + +DELETE FROM t1 WHERE b > 'bbb' AND a > 100; +--sorted_result +SELECT * FROM t1; + +# test TRUNCATE +TRUNCATE TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt new file mode 100644 index 0000000000000..71f74ee53abb1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc new file mode 100644 index 0000000000000..9d03aae5c0c41 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.inc @@ -0,0 +1,20 @@ +# run a check script to verify sst files reduced enough during each optimize table +perl; + +$size += -s $_ for (<$ENV{datadir}/.rocksdb/*.sst>); +$file= "$ENV{MYSQL_TMP_DIR}/sst_size.dat"; + +if (-f $file) { + open(F, '<', $file) || die("Can't open file $file: $!"); + $old = ; + close F; + if ($old - $size < 1e6) { + print "sst file reduction was not enough $old -> $size (minimum 1000kb)\n"; + } else { + print "sst file reduction ok\n"; + } +} +open(F, '>', $file) || die("Can't open file $file: $!"); +print F $size; +close F; +EOF diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test new file mode 100644 index 0000000000000..7a8f4fc708578 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimize_table.test @@ -0,0 +1,81 @@ +--source include/have_rocksdb.inc + +# +# OPTIMIZE TABLE statements +# + +let datadir = `SELECT @@datadir`; + +--disable_warnings +DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6; +--enable_warnings +create table t1 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t3 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb; +create table t4 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t5 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; +create table t6 (id int, value int, value2 varchar(200), primary key (id) comment 'rev:cf_i', index(value) comment 'rev:cf_i') engine=rocksdb; + +--disable_query_log +let $t = 1; +while ($t <= 6) { + let $i = 1; + while ($i <= 10000) { + let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150)); + inc $i; + eval $insert; + } + inc $t; +} +--enable_query_log + +# Disable auto compaction so that effects of optimize table are stable +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart:--rocksdb_default_cf_options=write_buffer_size=64k;target_file_size_base=64k;max_bytes_for_level_base=1m;compression_per_level=kNoCompression;disable_auto_compactions=true;level0_stop_writes_trigger=1000 " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t6; +delete from t1 where id <= 9900; +delete from t2 where id <= 9900; +delete from t3 where id <= 9900; +delete from t4 where id <= 9900; +delete from t5 where id <= 9900; +delete from t6 where id <= 9900; + +source optimize_table.inc; +optimize table t1; +source optimize_table.inc; +optimize table t3; +source optimize_table.inc; +optimize table t4; +source optimize_table.inc; +optimize table t6; +source optimize_table.inc; + +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select count(*) from t6; + +#cleanup +optimize table t2; +optimize table t5; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +DROP TABLE t5; +DROP TABLE t6; +--remove_file $MYSQL_TMP_DIR/sst_size.dat + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test b/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test new file mode 100644 index 0000000000000..beccc8a6b8ea9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test @@ -0,0 +1,3 @@ + +let $engine=rocksdb; +--source include/loose_index_scans.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/partition.test b/storage/rocksdb/mysql-test/rocksdb/t/partition.test new file mode 100644 index 0000000000000..5954c0d95dbd0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/partition.test @@ -0,0 +1,762 @@ + +--source include/have_rocksdb.inc +--source include/have_partition.inc + +# +# Create tables with partitions and try to update/select from them. +# +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; + +--enable_warnings + +CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4; + +--disable_query_log +let $max = 1000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} + +ALTER TABLE t1 REBUILD PARTITION p0, p1; +ALTER TABLE t1 OPTIMIZE PARTITION p0, p1; +ALTER TABLE t1 ANALYZE PARTITION p3; +ALTER TABLE t1 REPAIR PARTITION p0,p1; +ALTER TABLE t1 CHECK PARTITION p1; + +# Parition string is "#P#". To verify that parsing is done correctly then we'll +# verify if tables containing "P" somwhere can be created correctly. +CREATE TABLE VAR_POP (a int) ENGINE = ROCKSDB; +CREATE TABLE TEMP0 (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 3; +CREATE TABLE VAR_SAMP (a int) ENGINE = ROCKSDB PARTITION BY HASH (a) PARTITIONS 10; + +--enable_query_log + +select lower(table_name) as tname +from information_schema.tables +where table_schema=database() +order by tname; + +SELECT * FROM t1 ORDER BY i LIMIT 10; +SELECT COUNT(*) FROM t1; + +# +# Test-cases above are copied from +# https://dev.mysql.com/doc/refman/5.6/en/partitioning.html to validate that the +# partitioning related examples work with MyRocks. +# + +# Create a table that is partitioned by hash into 6 partitions. +CREATE TABLE ti( + id INT, + amount DECIMAL(7,2), + tr_date DATE +) ENGINE=ROCKSDB + PARTITION BY HASH(MONTH(tr_date)) + PARTITIONS 6; + +CREATE TABLE members ( + firstname VARCHAR(25) NOT NULL, + lastname VARCHAR(25) NOT NULL, + username VARCHAR(16) NOT NULL, + email VARCHAR(35), + joined DATE NOT NULL +) ENGINE=ROCKSDB + PARTITION BY KEY(joined) + PARTITIONS 6; + +CREATE TABLE members_2 ( + firstname VARCHAR(25) NOT NULL, + lastname VARCHAR(25) NOT NULL, + username VARCHAR(16) NOT NULL, + email VARCHAR(35), + joined DATE NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE(YEAR(joined)) ( + PARTITION p0 VALUES LESS THAN (1960), + PARTITION p1 VALUES LESS THAN (1970), + PARTITION p2 VALUES LESS THAN (1980), + PARTITION p3 VALUES LESS THAN (1990), + PARTITION p4 VALUES LESS THAN MAXVALUE + ); + +# Partition names are not case-sensitive. +--error 1517 +CREATE TABLE t2 (val INT) + ENGINE=ROCKSDB + PARTITION BY LIST(val)( + PARTITION mypart VALUES IN (1,3,5), + PARTITION MyPart VALUES IN (2,4,6) + ); + +CREATE TABLE employees ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT NOT NULL, + store_id INT NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE (store_id) ( + PARTITION p0 VALUES LESS THAN (6), + PARTITION p1 VALUES LESS THAN (11), + PARTITION p2 VALUES LESS THAN (16), + PARTITION p3 VALUES LESS THAN MAXVALUE + ); + +CREATE TABLE employees_2 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT NOT NULL, + store_id INT NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE (job_code) ( + PARTITION p0 VALUES LESS THAN (100), + PARTITION p1 VALUES LESS THAN (1000), + PARTITION p2 VALUES LESS THAN (10000) + ); + +CREATE TABLE employees_3 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY RANGE (YEAR(separated)) ( + PARTITION p0 VALUES LESS THAN (1991), + PARTITION p1 VALUES LESS THAN (1996), + PARTITION p2 VALUES LESS THAN (2001), + PARTITION p3 VALUES LESS THAN MAXVALUE + ); + +CREATE TABLE quarterly_report_status ( + report_id INT NOT NULL, + report_status VARCHAR(20) NOT NULL, + report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=ROCKSDB + PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) ( + PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ), + PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ), + PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ), + PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ), + PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ), + PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ), + PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ), + PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ), + PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ), + PARTITION p9 VALUES LESS THAN (MAXVALUE) + ); + +CREATE TABLE employees_4 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY LIST(store_id) ( + PARTITION pNorth VALUES IN (3,5,6,9,17), + PARTITION pEast VALUES IN (1,2,10,11,19,20), + PARTITION pWest VALUES IN (4,12,13,14,18), + PARTITION pCentral VALUES IN (7,8,15,16) + ); + +CREATE TABLE h2 ( + c1 INT, + c2 INT +) ENGINE=ROCKSDB + PARTITION BY LIST(c1) ( + PARTITION p0 VALUES IN (1, 4, 7), + PARTITION p1 VALUES IN (2, 5, 8) + ); + +# ERROR 1526 (HY000): Table has no partition for value 3 +--error 1526 +INSERT INTO h2 VALUES (3, 5); + +CREATE TABLE rcx ( + a INT, + b INT, + c CHAR(3), + d INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,d,c) ( + PARTITION p0 VALUES LESS THAN (5,10,'ggg'), + PARTITION p1 VALUES LESS THAN (10,20,'mmm'), + PARTITION p2 VALUES LESS THAN (15,30,'sss'), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) + ); + +CREATE TABLE r1 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (5), + PARTITION p1 VALUES LESS THAN (MAXVALUE) + ); + +INSERT INTO r1 VALUES (5,10), (5,11), (5,12); + +CREATE TABLE rc1 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a, b) ( + PARTITION p0 VALUES LESS THAN (5, 12), + PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE) + ); + +INSERT INTO rc1 VALUES (5,10), (5,11), (5,12); +SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12); + +CREATE TABLE rx ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS (a) ( + PARTITION p0 VALUES LESS THAN (5), + PARTITION p1 VALUES LESS THAN (MAXVALUE) + ); + +INSERT INTO rx VALUES (5,10), (5,11), (5,12); + +CREATE TABLE rc2 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b) ( + PARTITION p0 VALUES LESS THAN (0,10), + PARTITION p1 VALUES LESS THAN (10,20), + PARTITION p2 VALUES LESS THAN (10,30), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE) + ); + +CREATE TABLE rc3 ( + a INT, + b INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b) ( + PARTITION p0 VALUES LESS THAN (0,10), + PARTITION p1 VALUES LESS THAN (10,20), + PARTITION p2 VALUES LESS THAN (10,30), + PARTITION p3 VALUES LESS THAN (10,35), + PARTITION p4 VALUES LESS THAN (20,40), + PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE) +); + +CREATE TABLE rc4 ( + a INT, + b INT, + c INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b,c) ( + PARTITION p0 VALUES LESS THAN (0,25,50), + PARTITION p1 VALUES LESS THAN (10,20,100), + PARTITION p2 VALUES LESS THAN (10,30,50), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) + ); + +SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50); + +-- ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition + +--error 1493 +CREATE TABLE rcf ( + a INT, + b INT, + c INT +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(a,b,c) ( + PARTITION p0 VALUES LESS THAN (0,25,50), + PARTITION p1 VALUES LESS THAN (20,20,100), + PARTITION p2 VALUES LESS THAN (10,30,50), + PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE) + ); + +CREATE TABLE employees_by_lname ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT NOT NULL, + store_id INT NOT NULL +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS (lname) ( + PARTITION p0 VALUES LESS THAN ('g'), + PARTITION p1 VALUES LESS THAN ('m'), + PARTITION p2 VALUES LESS THAN ('t'), + PARTITION p3 VALUES LESS THAN (MAXVALUE) + ); + +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) ( + PARTITION p0 VALUES LESS THAN ('g'), + PARTITION p1 VALUES LESS THAN ('m'), + PARTITION p2 VALUES LESS THAN ('t'), + PARTITION p3 VALUES LESS THAN (MAXVALUE) +); + +ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) ( + PARTITION p0 VALUES LESS THAN ('1970-01-01'), + PARTITION p1 VALUES LESS THAN ('1980-01-01'), + PARTITION p2 VALUES LESS THAN ('1990-01-01'), + PARTITION p3 VALUES LESS THAN ('2000-01-01'), + PARTITION p4 VALUES LESS THAN ('2010-01-01'), + PARTITION p5 VALUES LESS THAN (MAXVALUE) +); + +CREATE TABLE customers_1 ( + first_name VARCHAR(25), + last_name VARCHAR(25), + street_1 VARCHAR(30), + street_2 VARCHAR(30), + city VARCHAR(15), + renewal DATE +) ENGINE=ROCKSDB + PARTITION BY LIST COLUMNS(city) ( + PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'MönsterÃ¥s'), + PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'), + PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'), + PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo') + ); + +CREATE TABLE customers_2 ( + first_name VARCHAR(25), + last_name VARCHAR(25), + street_1 VARCHAR(30), + street_2 VARCHAR(30), + city VARCHAR(15), + renewal DATE +) ENGINE=ROCKSDB + PARTITION BY LIST COLUMNS(renewal) ( + PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03', + '2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'), + PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10', + '2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'), + PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17', + '2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'), + PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24', + '2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28') + ); + +CREATE TABLE customers_3 ( + first_name VARCHAR(25), + last_name VARCHAR(25), + street_1 VARCHAR(30), + street_2 VARCHAR(30), + city VARCHAR(15), + renewal DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE COLUMNS(renewal) ( + PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'), + PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'), + PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'), + PARTITION pWeek_4 VALUES LESS THAN('2010-03-01') + ); + +CREATE TABLE employees_hash ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY HASH(store_id) + PARTITIONS 4; + +CREATE TABLE employees_hash_1 ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY HASH( YEAR(hired) ) + PARTITIONS 4; + +CREATE TABLE t1_hash ( + col1 INT, + col2 CHAR(5), + col3 DATE +) ENGINE=ROCKSDB + PARTITION BY HASH( YEAR(col3) ) + PARTITIONS 4; + +CREATE TABLE employees_linear_hash ( + id INT NOT NULL, + fname VARCHAR(30), + lname VARCHAR(30), + hired DATE NOT NULL DEFAULT '1970-01-01', + separated DATE NOT NULL DEFAULT '9999-12-31', + job_code INT, + store_id INT +) ENGINE=ROCKSDB + PARTITION BY LINEAR HASH( YEAR(hired) ) + PARTITIONS 4; + +CREATE TABLE t1_linear_hash ( + col1 INT, + col2 CHAR(5), + col3 DATE +) ENGINE=ROCKSDB + PARTITION BY LINEAR HASH( YEAR(col3) ) + PARTITIONS 6; + +CREATE TABLE k1 ( + id INT NOT NULL PRIMARY KEY, + name VARCHAR(20) +) ENGINE=ROCKSDB + PARTITION BY KEY() + PARTITIONS 2; + +CREATE TABLE k2 ( + id INT NOT NULL, + name VARCHAR(20), + UNIQUE KEY (id) +) ENGINE=ROCKSDB + PARTITION BY KEY() + PARTITIONS 2; + +CREATE TABLE tm1 ( + s1 CHAR(32) PRIMARY KEY +) ENGINE=ROCKSDB + PARTITION BY KEY(s1) + PARTITIONS 10; + +CREATE TABLE tk ( + col1 INT NOT NULL, + col2 CHAR(5), + col3 DATE +) ENGINE=ROCKSDB + PARTITION BY LINEAR KEY (col1) + PARTITIONS 3; + +CREATE TABLE ts ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) + SUBPARTITIONS 2 ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (2000), + PARTITION p2 VALUES LESS THAN MAXVALUE + ); + +CREATE TABLE ts_1 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s2, + SUBPARTITION s3 + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s4, + SUBPARTITION s5 + ) + ); + +--error 1064 +CREATE TABLE ts_2 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s2, + SUBPARTITION s3 + ) + ); + +CREATE TABLE ts_3 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s2, + SUBPARTITION s3 + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s4, + SUBPARTITION s5 + ) + ); + +CREATE TABLE ts_4 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0, + SUBPARTITION s1 + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s2, + SUBPARTITION s3 + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s4, + SUBPARTITION s5 + ) + ); + +CREATE TABLE ts_5 ( + id INT, + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE(YEAR(purchased)) + SUBPARTITION BY HASH( TO_DAYS(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990) ( + SUBPARTITION s0a, + SUBPARTITION s0b + ), + PARTITION p1 VALUES LESS THAN (2000) ( + SUBPARTITION s1a, + SUBPARTITION s1b + ), + PARTITION p2 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s2a, + SUBPARTITION s2b + ) + ); + +CREATE TABLE trb3 ( + id INT, + name VARCHAR(50), + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (1995), + PARTITION p2 VALUES LESS THAN (2000), + PARTITION p3 VALUES LESS THAN (2005) + ); + +ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2; + +CREATE TABLE tr ( + id INT, + name VARCHAR(50), + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(purchased) ) ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (1995), + PARTITION p2 VALUES LESS THAN (2000), + PARTITION p3 VALUES LESS THAN (2005) + ); + +INSERT INTO tr VALUES + (1, 'desk organiser', '2003-10-15'), + (2, 'CD player', '1993-11-05'), + (3, 'TV set', '1996-03-10'), + (4, 'bookcase', '1982-01-10'), + (5, 'exercise bike', '2004-05-09'), + (6, 'sofa', '1987-06-05'), + (7, 'popcorn maker', '2001-11-22'), + (8, 'aquarium', '1992-08-04'), + (9, 'study desk', '1984-09-16'), + (10, 'lava lamp', '1998-12-25'); + +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; + +ALTER TABLE tr DROP PARTITION p2; + +SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31'; + +CREATE TABLE members_3 ( + id INT, + fname VARCHAR(25), + lname VARCHAR(25), + dob DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE( YEAR(dob) ) ( + PARTITION p0 VALUES LESS THAN (1970), + PARTITION p1 VALUES LESS THAN (1980), + PARTITION p2 VALUES LESS THAN (1990) + ); + +ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000)); + +# ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition +--error 1493 +ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960)); + +CREATE TABLE clients ( + id INT, + fname VARCHAR(30), + lname VARCHAR(30), + signed DATE +) ENGINE=ROCKSDB + PARTITION BY HASH( MONTH(signed) ) + PARTITIONS 12; + +ALTER TABLE clients COALESCE PARTITION 4; + +CREATE TABLE clients_lk ( + id INT, + fname VARCHAR(30), + lname VARCHAR(30), + signed DATE +) ENGINE=ROCKSDB + PARTITION BY LINEAR KEY(signed) + PARTITIONS 12; + +# ERROR 1508 (HY000): Cannot remove all partitions, use DROP TABLE instead +--error 1508 +ALTER TABLE clients COALESCE PARTITION 18; + +ALTER TABLE clients ADD PARTITION PARTITIONS 6; + +CREATE TABLE trb1 ( + id INT, + name VARCHAR(50), + purchased DATE +) ENGINE=ROCKSDB + PARTITION BY RANGE(id) ( + PARTITION p0 VALUES LESS THAN (3), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (9), + PARTITION p3 VALUES LESS THAN (11) + ); + +INSERT INTO trb1 VALUES + (1, 'desk organiser', '2003-10-15'), + (2, 'CD player', '1993-11-05'), + (3, 'TV set', '1996-03-10'), + (4, 'bookcase', '1982-01-10'), + (5, 'exercise bike', '2004-05-09'), + (6, 'sofa', '1987-06-05'), + (7, 'popcorn maker', '2001-11-22'), + (8, 'aquarium', '1992-08-04'), + (9, 'study desk', '1984-09-16'), + (10, 'lava lamp', '1998-12-25'); + +ALTER TABLE trb1 ADD PRIMARY KEY (id); + +# Clean up. +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS VAR_POP; +DROP TABLE IF EXISTS TEMP0; +DROP TABLE IF EXISTS VAR_SAMP; +DROP TABLE IF EXISTS ti; +DROP TABLE IF EXISTS members; +DROP TABLE IF EXISTS members_2; +DROP TABLE IF EXISTS employees; +DROP TABLE IF EXISTS employees_2; +DROP TABLE IF EXISTS employees_3; +DROP TABLE IF EXISTS quarterly_report_status; +DROP TABLE IF EXISTS employees_4; +DROP TABLE IF EXISTS h2; +DROP TABLE IF EXISTS rcx; +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS rc1; +DROP TABLE IF EXISTS rx; +DROP TABLE IF EXISTS rc2; +DROP TABLE IF EXISTS rc3; +DROP TABLE IF EXISTS rc4; +DROP TABLE IF EXISTS employees_by_lname; +DROP TABLE IF EXISTS customers_1; +DROP TABLE IF EXISTS customers_2; +DROP TABLE IF EXISTS customers_3; +DROP TABLE IF EXISTS employees_hash; +DROP TABLE IF EXISTS employees_hash_1; +DROP TABLE IF EXISTS t1_hash; +DROP TABLE IF EXISTS employees_linear_hash; +DROP TABLE IF EXISTS t1_linear_hash; +DROP TABLE IF EXISTS k1; +DROP TABLE IF EXISTS k2; +DROP TABLE IF EXISTS tm1; +DROP TABLE IF EXISTS tk; +DROP TABLE IF EXISTS ts; +DROP TABLE IF EXISTS ts_1; +DROP TABLE IF EXISTS ts_3; +DROP TABLE IF EXISTS ts_4; +DROP TABLE IF EXISTS ts_5; +DROP TABLE IF EXISTS trb3; +DROP TABLE IF EXISTS tr; +DROP TABLE IF EXISTS members_3; +DROP TABLE IF EXISTS clients; +DROP TABLE IF EXISTS clients_lk; +DROP TABLE IF EXISTS trb1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test b/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test new file mode 100644 index 0000000000000..4290811e868cf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/perf_context.test @@ -0,0 +1,92 @@ +--source include/have_rocksdb.inc + +# +# Information Schema perf context +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + + +SET @prior_rocksdb_perf_context_level = @@rocksdb_perf_context_level; +SET GLOBAL rocksdb_perf_context_level=3; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +CREATE TABLE t2 (k INT, PRIMARY KEY (k)) ENGINE = ROCKSDB; + +INSERT INTO t1 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); + +############################################################################### +# Test that expected perf context stats exists +############################################################################### + +# Check per-table perf context +--replace_column 5 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT WHERE TABLE_NAME = 't1'; + +# Check global perf context +--replace_column 2 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL; + +############################################################################### +# Test iteration skip counters +############################################################################### + +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); + +SELECT * FROM t1; + +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); + +SELECT * FROM t1 WHERE j BETWEEN 1 AND 5; + +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't1' +AND STAT_TYPE in ('INTERNAL_KEY_SKIPPED_COUNT', 'INTERNAL_DELETE_SKIPPED_COUNT'); + +############################################################################### +# Test write I/O stats +############################################################################### + +# Statistics for multi-statement transactions cannot be attributed to +# individual tables but should show up in global perf context stats + +BEGIN; +INSERT INTO t2 VALUES (1), (2); +INSERT INTO t2 VALUES (3), (4); +COMMIT; + +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; + +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS' AND VALUE > 0; + +SELECT VALUE INTO @a from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; + +# Single statement writes do show up in per-table stats +INSERT INTO t2 VALUES (5), (6), (7), (8); + +SELECT COUNT(*) from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_NAME = 't2' +AND STAT_TYPE = 'IO_WRITE_NANOS' +AND VALUE > 0; + +SELECT VALUE INTO @b from INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL +WHERE STAT_TYPE = 'IO_WRITE_NANOS'; + +SELECT CASE WHEN @b - @a > 0 THEN 'true' ELSE 'false' END; + +# cleanup +DROP TABLE t1; +DROP TABLE t2; +SET GLOBAL rocksdb_perf_context_level = @prior_rocksdb_perf_context_level; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test new file mode 100644 index 0000000000000..03d1d0a60bc30 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/persistent_cache.test @@ -0,0 +1,41 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--let $_cache_file_name= $MYSQLTEST_VARDIR/tmp/persistent_cache +--exec echo "wait" >$_expect_file_name + +# restart server with correct parameters +shutdown_server 10; +--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name +--sleep 5 +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + + +# insert values and flush out of memtable +CREATE TABLE t1 (a int primary key) ENGINE=ROCKSDB; +insert into t1 values (1); +set global rocksdb_force_flush_memtable_now=1; + +# pull data through cache +select * from t1 where a = 1; + +# restart server to re-read cache +--exec echo "wait" >$_expect_file_name +shutdown_server 10; +--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name +--sleep 5 +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# pull values from cache again +select * from t1 where a = 1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt new file mode 100644 index 0000000000000..52f4895dc2f31 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=16k --log-bin --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log-slave-updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test new file mode 100644 index 0000000000000..56070652618b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test @@ -0,0 +1,70 @@ +--source include/have_log_bin.inc +--source include/have_rocksdb.inc +--source include/count_sessions.inc +--disable_warnings +--source include/have_gtid.inc +--enable_warnings +-- let $uuid = `select @@server_uuid;` + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); + +# Read-only, long-running transaction. SingleDelete/Put shouldn't increase much. +select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +connection con2; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t1 SET value=value+1 WHERE id=1; + inc $i; + eval $update; +} +--enable_query_log + +connection con1; +select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; +select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +SELECT * FROM t1; +--error ER_UNKNOWN_ERROR +INSERT INTO t1 values (2, 2); +ROLLBACK; +SELECT * FROM t1; +INSERT INTO t1 values (2, 2); +SELECT * FROM t1 ORDER BY id; + +# Regular long-running transaction. +# No "Transaction could not check for conflicts for opearation" error should happen. +BEGIN; + +connection con2; +--disable_query_log +let $i = 5; +while ($i <= 10000) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +connection con1; +SELECT COUNT(*) FROM t1; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; +OPTIMIZE TABLE t1; +DROP TABLE t1; +reset master; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt new file mode 100644 index 0000000000000..75a17cc157eb5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range-master.opt @@ -0,0 +1,3 @@ +--rocksdb_debug_optimizer_n_rows=20000 +--rocksdb_records_in_range=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test new file mode 100644 index 0000000000000..15db32880845d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/records_in_range.test @@ -0,0 +1,146 @@ +--source include/have_rocksdb.inc + +--source include/restart_mysqld.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Create the table and insert some keys +CREATE TABLE t1 ( + i INT, + a INT, + b INT, + PRIMARY KEY (i), + KEY ka(a), + KEY kb(b) comment 'rev:cf1' +) ENGINE = rocksdb; + +--disable_query_log +let $max = 20000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +# get results for records_in_range prior to memtable flush +# normal CF +explain extended select * from t1 where a> 500 and a< 750; +explain extended select * from t1 where a< 750; +explain extended select * from t1 where a> 500; +explain extended select * from t1 where a>=0 and a<=1000; + +#reverse CF +explain extended select * from t1 where b> 500 and b< 750; +explain extended select * from t1 where b< 750; +explain extended select * from t1 where b> 500; +explain extended select * from t1 where b>=0 and b<=1000; + +## cost calculation differences between covering vs non-covering (#298) +set @save_rocksdb_records_in_range = @@session.rocksdb_records_in_range; +set rocksdb_records_in_range = 15000; +# covering, range +explain extended select a from t1 where a < 750; +# non-covering, full +explain extended select a, b from t1 where a < 750; +# covering, ref +explain extended select a from t1 where a = 700; +# non-covering, ref +explain extended select a,b from t1 where a = 700; +# covering, full index +explain extended select a from t1 where a in (700, 800); +# non-covering, full +explain extended select a,b from t1 where a in (700, 800); +set rocksdb_records_in_range=8000; +# covering, range +explain extended select a from t1 where a in (700, 800); +# non-covering, full +explain extended select a,b from t1 where a in (700, 800); +set rocksdb_records_in_range = @save_rocksdb_records_in_range; + +# flush memtable and repeat +set global rocksdb_force_flush_memtable_now = true; +# normal CF +explain extended select * from t1 where a> 500 and a< 750; +explain extended select * from t1 where a< 750; +explain extended select * from t1 where a> 500; +explain extended select * from t1 where a>=0 and a<=1000; + +#reverse CF +explain extended select * from t1 where b> 500 and b< 750; +explain extended select * from t1 where b< 750; +explain extended select * from t1 where b> 500; +explain extended select * from t1 where b>=0 and b<=1000; + +# a set of 1 +explain extended select * from t1 where a>= 500 and a<= 500; +explain extended select * from t1 where b>= 500 and b<= 500; + +# two indexes +explain extended select * from t1 where a< 750 and b> 500 and b< 750; + +# composite index +drop index ka on t1; +drop index kb on t1; +create index kab on t1(a,b); +set global rocksdb_force_flush_memtable_now = true; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; + +# override records in range +set rocksdb_records_in_range=444; +explain extended select * from t1 where a< 750 and b> 500 and b< 750; +set rocksdb_records_in_range=0; + +# issue 82 +## forward cf +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +drop table linktable; + +## rev cf +CREATE TABLE `linktable` ( + `id1` bigint(20) unsigned NOT NULL DEFAULT '0', + `id1_type` int(10) unsigned NOT NULL DEFAULT '0', + `id2` bigint(20) unsigned NOT NULL DEFAULT '0', + `id2_type` int(10) unsigned NOT NULL DEFAULT '0', + `link_type` bigint(20) unsigned NOT NULL DEFAULT '0', + `visibility` tinyint(3) NOT NULL DEFAULT '0', + `data` varchar(255) COLLATE latin1_bin NOT NULL DEFAULT '', + `time` bigint(20) unsigned NOT NULL DEFAULT '0', + `version` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`link_type`,`id1`,`id2`) COMMENT 'cf_link_pk', + KEY `id1_type` (`id1`,`link_type`,`visibility`,`time`,`version`,`data`) COMMENT 'rev:cf_link_id1_type' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +insert into linktable values (1,1,1,1,1,1,1,1,1); +insert into linktable values (1,1,2,1,1,1,1,1,1); +insert into linktable values (1,1,3,1,1,1,1,1,1); +insert into linktable values (1,1,4,1,1,1,1,1,1); +set global rocksdb_force_flush_memtable_now = true; +explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2); +drop table linktable; + +#cleanup +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc new file mode 100644 index 0000000000000..47d6a29909556 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.inc @@ -0,0 +1,38 @@ +# +# REPAIR TABLE statements +# +# Note: the output is likely to be different for the engine under test, +# in which case rdiff will be needed. Or, the output might say that +# the storage engine does not support REPAIR. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +CREATE TABLE t2 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; + +REPAIR TABLE t1; + +INSERT INTO t1 (a,b) VALUES (3,'c'); +INSERT INTO t2 (a,b) VALUES (4,'d'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2; +INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f'); +REPAIR LOCAL TABLE t2; +INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); +INSERT INTO t2 (a,b) VALUES (9,'i'); +REPAIR LOCAL TABLE t2, t1 EXTENDED; +INSERT INTO t1 (a,b) VALUES (10,'j'); +INSERT INTO t2 (a,b) VALUES (11,'k'); +REPAIR TABLE t1, t2 QUICK USE_FRM; +INSERT INTO t1 (a,b) VALUES (12,'l'); +INSERT INTO t2 (a,b) VALUES (13,'m'); +REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM; +FLUSH TABLE t1; + +DROP TABLE t1, t2; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test new file mode 100644 index 0000000000000..5c4807ce986e6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/repair_table.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# REPAIR TABLE statements +# + +--source repair_table.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/replace.test b/storage/rocksdb/mysql-test/rocksdb/t/replace.test new file mode 100644 index 0000000000000..3ac37330b2dd8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/replace.test @@ -0,0 +1,54 @@ +--source include/have_rocksdb.inc + +# +# Basic REPLACE statements +# + +########################################### +# TODO: +# A part of the test is currently disabled +# because unique indexes are not supported +########################################### + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +# When there is no duplicate key, REPLACE should work as INSERT + +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'); +--sorted_result +SELECT a,b FROM t1; + +REPLACE t1 (a,b) VALUE (10,'foo'),(10,'foo'); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--disable_parsing + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX (a)) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (2,'d'); +REPLACE INTO t1 (a,b) VALUES (2,'d'); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 (a INT, b CHAR(8), PRIMARY KEY (b)) ENGINE=rocksdb; +REPLACE INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (4,'b'); +REPLACE INTO t1 (a,b) VALUES (4,'b'); +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt new file mode 100644 index 0000000000000..6ad42e58aa22c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test new file mode 100644 index 0000000000000..87fc2e6f0fb06 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -0,0 +1,1987 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc + +# +# RocksDB Storage Engine tests +# +select ENGINE,COMMENT,TRANSACTIONS,XA,SAVEPOINTS from information_schema.engines where engine = 'rocksdb'; + +--disable_warnings +drop table if exists t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t11,t12,t13,t14,t15,t16,t17,t18,t19,t20; +drop table if exists t21,t22,t23,t24,t25,t26,t27,t28,t29; +drop table if exists t30,t31,t32,t33,t34,t35,t36,t37,t38,t39; +drop table if exists t40,t41,t42,t43,t44,t45,t46,t47,t48,t49; +--enable_warnings + +# Disable background compaction to prevent stats from affect explain output +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--echo # +--echo # Issue #1: Don't update indexes if index values have not changed +--echo # +# [Jay Edgar] I moved this test first because it uses the +# rocksdb_number_keys_written value, but this value is affected out of band +# by drop tables. There is a background thread that periodically processes +# through the list of dropped keys and if any are gone from the database it +# deletes information related to the key - and this delete causes this count +# to be incorrect. I moved this test first and made the whole test require +# a fresh server to hopefully avoid tihs. +create table t1 ( + pk int primary key, + a int, + b int, + key(a) +) engine=rocksdb; + +insert into t1 values +(1,1,1), (2,2,2), (3,3,3), (4,4,4); + +set @var1=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_written'); + +--echo # Do an update that doesn't change the key 'a'. +update t1 set b=3334341 where a=2; + +set @var2=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_written'); +--echo # The following should produce 1 +select @var2 - @var1; + +--echo # Do an update that sets the key to the same value +update t1 set a=pk where a=3; +set @var3=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_written'); +--echo # We have 'updated' column to the same value, so the following must return 0: +select @var3 - @var2; +drop table t1; + +create table t0 (a int primary key) engine=rocksdb; +show create table t0; +drop table t0; + +create table t1 (a int primary key, b int) engine=rocksdb; +insert into t1 values (1,1); +insert into t1 values (2,2); + +select * from t1; + +--echo # Check that we can create another table and insert there +create table t2 (a varchar(10) primary key, b varchar(10)) engine=rocksdb; +insert into t2 value ('abc','def'); +insert into t2 value ('hijkl','mnopq'); +select * from t2; + +--echo # Select again from t1 to see that records from different tables dont mix +select * from t1; + +explain select * from t2 where a='no-such-key'; +--replace_column 9 # +explain select * from t2 where a='abc'; +select * from t2 where a='abc'; + +--echo # Try a composite PK +create table t3 ( + pk1 int, + pk2 varchar(10), + col1 varchar(10), + primary key(pk1, pk2) +) engine=rocksdb; + +insert into t3 values (2,'two', 'row#2'); +insert into t3 values (3,'three', 'row#3'); +insert into t3 values (1,'one', 'row#1'); + +select * from t3; +select * from t3 where pk1=3 and pk2='three'; + +drop table t1, t2, t3; + +--echo # +--echo # Test blob values +--echo # + +create table t4 (a int primary key, b blob) engine=rocksdb; +insert into t4 values (1, repeat('quux-quux', 60)); +insert into t4 values (10, repeat('foo-bar', 43)); +insert into t4 values (5, repeat('foo-bar', 200)); + +insert into t4 values (2, NULL); + + +select + a, + (case a + when 1 then b=repeat('quux-quux', 60) + when 10 then b=repeat('foo-bar', 43) + when 5 then b=repeat('foo-bar', 200) + when 2 then b is null + else 'IMPOSSIBLE!' end) as CMP +from t4; + +drop table t4; + +--echo # +--echo # Test blobs of various sizes +--echo # + +--echo # TINYBLOB +create table t5 (a int primary key, b tinyblob) engine=rocksdb; +insert into t5 values (1, repeat('quux-quux', 6)); +insert into t5 values (10, repeat('foo-bar', 4)); +insert into t5 values (5, repeat('foo-bar', 2)); +select + a, + (case a + when 1 then b=repeat('quux-quux', 6) + when 10 then b=repeat('foo-bar', 4) + when 5 then b=repeat('foo-bar', 2) + else 'IMPOSSIBLE!' end) as CMP +from t5; +drop table t5; + +--echo # MEDIUMBLOB +create table t6 (a int primary key, b mediumblob) engine=rocksdb; +insert into t6 values (1, repeat('AB', 65000)); +insert into t6 values (10, repeat('bbb', 40000)); +insert into t6 values (5, repeat('foo-bar', 2)); +select + a, + (case a + when 1 then b=repeat('AB', 65000) + when 10 then b=repeat('bbb', 40000) + when 5 then b=repeat('foo-bar', 2) + else 'IMPOSSIBLE!' end) as CMP +from t6; +drop table t6; + +--echo # LONGBLOB +create table t7 (a int primary key, b longblob) engine=rocksdb; +insert into t7 values (1, repeat('AB', 65000)); +insert into t7 values (10, repeat('bbb', 40000)); +insert into t7 values (5, repeat('foo-bar', 2)); +select + a, + (case a + when 1 then b=repeat('AB', 65000) + when 10 then b=repeat('bbb', 40000) + when 5 then b=repeat('foo-bar', 2) + else 'IMPOSSIBLE!' end) as CMP +from t7; +drop table t7; + + +--echo # +--echo # Check if DELETEs work +--echo # +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; + +insert into t8 values + ('one', 'eins'), + ('two', 'zwei'), + ('three', 'drei'), + ('four', 'vier'), + ('five', 'funf'); + +--echo # Delete by PK +--replace_column 9 # +explain delete from t8 where a='three'; +delete from t8 where a='three'; + +select * from t8; + +--echo # Delete while doing a full table scan +delete from t8 where col1='eins' or col1='vier'; +select * from t8; + +--echo # delete w/o WHERE: +delete from t8; +select * from t8; + +--echo # +--echo # Test UPDATEs +--echo # +insert into t8 values + ('one', 'eins'), + ('two', 'zwei'), + ('three', 'drei'), + ('four', 'vier'), + ('five', 'funf'); + +update t8 set col1='dva' where a='two'; + +update t8 set a='fourAAA' where col1='vier'; + +select * from t8; +delete from t8; + +--echo # +--echo # Basic transactions tests +--echo # +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +rollback; +select * from t8; + +begin; +insert into t8 values ('trx1-val1', 'data'); +insert into t8 values ('trx1-val2', 'data'); +commit; +select * from t8; + +drop table t8; + +--echo # +--echo # Check if DROP TABLE works +--echo # +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +insert into t8 values ('foo','foo'); +drop table t8; +create table t8 (a varchar(10) primary key, col1 varchar(12)) engine=rocksdb; +select * from t8; +drop table t8; + +--echo # +--echo # MDEV-3961: Assertion ... on creating a TEMPORARY RocksDB table +--echo # +--error ER_ILLEGAL_HA_CREATE_OPTION +CREATE TEMPORARY TABLE t10 (pk INT PRIMARY KEY) ENGINE=RocksDB; + +--echo # +--echo # MDEV-3963: JOIN or WHERE conditions involving keys on RocksDB tables don't work +--echo # +CREATE TABLE t10 (i INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t10 VALUES (1),(3); +CREATE TABLE t11 (j INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t11 VALUES (1),(4); + +select * from t10; +select * from t11; +--replace_column 9 # +EXPLAIN +SELECT * FROM t10, t11 WHERE i=j; +SELECT * FROM t10, t11 WHERE i=j; + +DROP TABLE t10,t11; + +--echo # +--echo # MDEV-3962: SELECT with ORDER BY causes "ERROR 1030 (HY000): Got error 122 +--echo # +CREATE TABLE t12 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t12 VALUES (2),(1); +SELECT * FROM t12 ORDER BY pk; +DROP TABLE t12; + +--echo # +--echo # MDEV-3964: Assertion `!pk_descr' fails in ha_rocksdb::open on adding partitions ... +--echo # +create table t14 (pk int primary key) engine=RocksDB partition by hash(pk) partitions 2; +#--error ER_GET_ERRNO +#alter table t14 add partition partitions 2; +# ^^ works, but causes weird warnings in error log. +drop table t14; + +--echo # +--echo # MDEV-3960: Server crashes on running DISCARD TABLESPACE on a RocksDB table +--echo # +create table t9 (i int primary key) engine=rocksdb; +--error ER_ILLEGAL_HA +alter table t9 discard tablespace; +drop table t9; + +--echo # +--echo # MDEV-3959: Assertion `slice->size() == table->s->reclength' fails ... +--echo # on accessing a table after ALTER +--echo # +CREATE TABLE t15 (a INT, rocksdb_pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t15 VALUES (1,1),(5,2); +#--error ER_ILLEGAL_HA +ALTER TABLE t15 DROP COLUMN a; +DROP TABLE t15; + +--echo # +--echo # MDEV-3968: UPDATE produces a wrong result while modifying a PK on a RocksDB table +--echo # +create table t16 (pk int primary key, a char(8)) engine=RocksDB; +insert into t16 values (1,'a'),(2,'b'),(3,'c'),(4,'d'); + +# +# Not anymore: The following query will still eat a record because of CANT-SEE-OWN-CHANGES +# property. +# +--error ER_DUP_ENTRY +update t16 set pk=100, a = 'updated' where a in ('b','c'); +select * from t16; +drop table t16; + +--echo # +--echo # MDEV-3970: A set of assorted crashes on inserting a row into a RocksDB table +--echo # +--disable_warnings +drop table if exists t_very_long_table_name; +--enable_warnings + +CREATE TABLE `t_very_long_table_name` ( + `c` char(1) NOT NULL, + `c0` char(0) NOT NULL, + `c1` char(1) NOT NULL, + `c20` char(20) NOT NULL, + `c255` char(255) NOT NULL, + PRIMARY KEY (`c255`) + ) ENGINE=RocksDB DEFAULT CHARSET=latin1; +INSERT INTO t_very_long_table_name VALUES ('a', '', 'c', REPEAT('a',20), REPEAT('x',255)); +drop table t_very_long_table_name; + + +--echo # +--echo # Test table locking and read-before-write checks. +--echo # +create table t17 (pk varchar(12) primary key, col1 varchar(12)) engine=rocksdb; +insert into t17 values ('row1', 'val1'); + +--error ER_DUP_ENTRY +insert into t17 values ('row1', 'val1-try2'); +--error ER_DUP_ENTRY +insert into t17 values ('ROW1', 'val1-try2'); + +insert into t17 values ('row2', 'val2'); +insert into t17 values ('row3', 'val3'); + +--echo # This is ok +update t17 set pk='row4' where pk='row1'; + +--echo # This will try to overwrite another row: +--error ER_DUP_ENTRY +update t17 set pk='row3' where pk='row2'; + +select * from t17; + +--echo # +--echo # Locking tests +--echo # + +connect (con1,localhost,root,,); + +--echo # First, make sure there's no locking when transactions update different rows +connection con1; +set autocommit=0; +update t17 set col1='UPD1' where pk='row2'; + +connection default; +update t17 set col1='UPD2' where pk='row3'; + +connection con1; +commit; + +connection default; +select * from t17; + +--echo # Check the variable +show variables like 'rocksdb_lock_wait_timeout'; +set rocksdb_lock_wait_timeout=2; # seconds +show variables like 'rocksdb_lock_wait_timeout'; + +--echo # Try updating the same row from two transactions +connection con1; +begin; +update t17 set col1='UPD2-AA' where pk='row2'; + +connection default; +--error ER_LOCK_WAIT_TIMEOUT +update t17 set col1='UPD2-BB' where pk='row2'; + +set rocksdb_lock_wait_timeout=1000; # seconds +--send + update t17 set col1='UPD2-CC' where pk='row2'; + +connection con1; +rollback; + +connection default; +reap; +select * from t17 where pk='row2'; + +drop table t17; + +disconnect con1; +--echo # +--echo # MDEV-4035: RocksDB: SELECT produces different results inside a transaction (read is not repeatable) +--echo # +--enable_connect_log + +create table t18 (pk int primary key, i int) engine=RocksDB; +begin; +select * from t18; +select * from t18 where pk = 1; + +--connect (con1,localhost,root,,) +insert into t18 values (1,100); + +--connection default +select * from t18; +select * from t18 where pk = 1; +commit; + +drop table t18; + +--echo # +--echo # MDEV-4036: RocksDB: INSERT .. ON DUPLICATE KEY UPDATE does not work, produces ER_DUP_KEY +--echo # +create table t19 (pk int primary key, i int) engine=RocksDB; +insert into t19 values (1,1); +insert into t19 values (1,100) on duplicate key update i = 102; +select * from t19; +drop table t19; + +--echo # MDEV-4037: RocksDB: REPLACE doesn't work, produces ER_DUP_KEY +create table t20 (pk int primary key, i int) engine=RocksDB; +insert into t20 values (1,1); +replace into t20 values (1,100); +select * from t20; +drop table t20; + +--echo # +--echo # MDEV-4041: Server crashes in Primary_key_comparator::get_hashnr on INSERT +--echo # +create table t21 (v varbinary(16) primary key, i int) engine=RocksDB; +insert into t21 values ('a',1); +select * from t21; +drop table t21; + +--echo # +--echo # MDEV-4047: RocksDB: Assertion `0' fails in Protocol::end_statement() on multi-table INSERT IGNORE +--echo # + +CREATE TABLE t22 (a int primary key) ENGINE=RocksDB; +INSERT INTO t22 VALUES (1),(2); +CREATE TABLE t23 (b int primary key) ENGINE=RocksDB; +INSERT INTO t23 SELECT * FROM t22; +DELETE IGNORE t22.*, t23.* FROM t22, t23 WHERE b < a; +DROP TABLE t22,t23; + +--echo # +--echo # MDEV-4046: RocksDB: Multi-table DELETE locks itself and ends with ER_LOCK_WAIT_TIMEOUT +--echo # +CREATE TABLE t24 (pk int primary key) ENGINE=RocksDB; +INSERT INTO t24 VALUES (1),(2); + +CREATE TABLE t25 LIKE t24; +INSERT INTO t25 SELECT * FROM t24; + +DELETE t25.* FROM t24, t25; +DROP TABLE t24,t25; + +--echo # +--echo # MDEV-4044: RocksDB: UPDATE or DELETE with ORDER BY locks itself +--echo # +create table t26 (pk int primary key, c char(1)) engine=RocksDB; +insert into t26 values (1,'a'),(2,'b'); +update t26 set c = 'x' order by pk limit 1; +delete from t26 order by pk limit 1; +select * from t26; +drop table t26; + + +--echo # +--echo # Test whether SELECT ... FOR UPDATE puts locks +--echo # +create table t27(pk varchar(10) primary key, col1 varchar(20)) engine=RocksDB; +insert into t27 values + ('row1', 'row1data'), + ('row2', 'row2data'), + ('row3', 'row3data'); + +connection con1; +begin; +select * from t27 where pk='row3' for update; + +connection default; +set rocksdb_lock_wait_timeout=1; +--error ER_LOCK_WAIT_TIMEOUT +update t27 set col1='row2-modified' where pk='row3'; + +connection con1; +rollback; +connection default; +disconnect con1; + +drop table t27; + +--echo # +--echo # MDEV-4060: RocksDB: Assertion `! trx->batch' fails in +--echo # +create table t28 (pk int primary key, a int) engine=RocksDB; +insert into t28 values (1,10),(2,20); +begin; +update t28 set a = 100 where pk = 3; +rollback; +select * from t28; +drop table t28; + + +--echo # +--echo # Secondary indexes +--echo # +create table t30 ( + pk varchar(16) not null primary key, + key1 varchar(16) not null, + col1 varchar(16) not null, + key(key1) +) engine=rocksdb; + +insert into t30 values ('row1', 'row1-key', 'row1-data'); +insert into t30 values ('row2', 'row2-key', 'row2-data'); +insert into t30 values ('row3', 'row3-key', 'row3-data'); + +--replace_column 9 # +explain +select * from t30 where key1='row2-key'; +select * from t30 where key1='row2-key'; + +--replace_column 9 # +explain +select * from t30 where key1='row1'; +--echo # This will produce nothing: +select * from t30 where key1='row1'; + +--replace_column 9 # +explain +select key1 from t30; +select key1 from t30; + +--echo # Create a duplicate record +insert into t30 values ('row2a', 'row2-key', 'row2a-data'); + +--echo # Can we see it? +select * from t30 where key1='row2-key'; + +delete from t30 where pk='row2'; +select * from t30 where key1='row2-key'; + +--echo # +--echo # Range scans on secondary index +--echo # +delete from t30; +insert into t30 values + ('row1', 'row1-key', 'row1-data'), + ('row2', 'row2-key', 'row2-data'), + ('row3', 'row3-key', 'row3-data'), + ('row4', 'row4-key', 'row4-data'), + ('row5', 'row5-key', 'row5-data'); +analyze table t30; + +--replace_column 9 # +explain +select * from t30 where key1 <='row3-key'; +select * from t30 where key1 <='row3-key'; + +--replace_column 9 # +explain +select * from t30 where key1 between 'row2-key' and 'row4-key'; +select * from t30 where key1 between 'row2-key' and 'row4-key'; + +--replace_column 9 # +explain +select * from t30 where key1 in ('row2-key','row4-key'); +select * from t30 where key1 in ('row2-key','row4-key'); + +--replace_column 9 # +explain +select key1 from t30 where key1 in ('row2-key','row4-key'); +select key1 from t30 where key1 in ('row2-key','row4-key'); + +--replace_column 9 # +explain +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; +select * from t30 where key1 > 'row1-key' and key1 < 'row4-key'; + +--replace_column 9 # +explain +select * from t30 order by key1 limit 3; +select * from t30 order by key1 limit 3; + +--replace_column 9 # +explain +select * from t30 order by key1 desc limit 3; +select * from t30 order by key1 desc limit 3; + +--echo # +--echo # Range scans on primary key +--echo # +--replace_column 9 # +explain +select * from t30 where pk <='row3'; +select * from t30 where pk <='row3'; + +--replace_column 9 # +explain +select * from t30 where pk between 'row2' and 'row4'; +select * from t30 where pk between 'row2' and 'row4'; + +--replace_column 9 # +explain +select * from t30 where pk in ('row2','row4'); +select * from t30 where pk in ('row2','row4'); + +--replace_column 9 # +explain +select * from t30 order by pk limit 3; +select * from t30 order by pk limit 3; + +drop table t30; + + +--echo # +--echo # MDEV-3841: RocksDB: Reading by PK prefix does not work +--echo # +create table t31 (i int, j int, k int, primary key(i,j,k)) engine=RocksDB; +insert into t31 values (1,10,100),(2,20,200); +select * from t31 where i = 1; +select * from t31 where j = 10; +select * from t31 where k = 100; +select * from t31 where i = 1 and j = 10; +select * from t31 where i = 1 and k = 100; +select * from t31 where j = 10 and k = 100; +select * from t31 where i = 1 and j = 10 and k = 100; +drop table t31; + +--echo # +--echo # MDEV-4055: RocksDB: UPDATE/DELETE by a multi-part PK does not work +--echo # +create table t32 (i int, j int, k int, primary key(i,j,k), a varchar(8)) engine=RocksDB; +insert into t32 values + (1,10,100,''), + (2,20,200,''); +select * from t32 where i = 1 and j = 10 and k = 100; +update t32 set a = 'updated' where i = 1 and j = 10 and k = 100; +select * from t32; +drop table t32; + +--echo # +--echo # MDEV-3841: RocksDB: Assertion `0' fails in ha_rocksdb::index_read_map on range select with ORDER BY .. DESC +--echo # +CREATE TABLE t33 (pk INT PRIMARY KEY, a CHAR(1)) ENGINE=RocksDB; +INSERT INTO t33 VALUES (1,'a'),(2,'b'); +SELECT * FROM t33 WHERE pk <= 10 ORDER BY pk DESC; +DROP TABLE t33; + +--echo # +--echo # MDEV-4081: RocksDB throws error 122 on an attempt to create a table with unique index +--echo # +#--error ER_GET_ERRMSG +--echo # Unique indexes can be created, but uniqueness won't be enforced +create table t33 (pk int primary key, u int, unique index(u)) engine=RocksDB; +drop table t33; + +--echo # +--echo # MDEV-4077: RocksDB: Wrong result (duplicate row) on select with range +--echo # +CREATE TABLE t34 (pk INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t34 VALUES (10),(11); +SELECT pk FROM t34 WHERE pk > 5 AND pk < 15; +SELECT pk FROM t34 WHERE pk BETWEEN 5 AND 15; +SELECT pk FROM t34 WHERE pk > 5; +SELECT pk FROM t34 WHERE pk < 15; +drop table t34; + +--echo # +--echo # MDEV-4086: RocksDB does not allow a query with multi-part pk and index and ORDER BY .. DEC +--echo # +create table t35 (a int, b int, c int, d int, e int, primary key (a,b,c), key (a,c,d,e)) engine=RocksDB; +insert into t35 values (1,1,1,1,1),(2,2,2,2,2); +select * from t35 where a = 1 and c = 1 and d = 1 order by e desc; +drop table t35; + +--echo # +--echo # MDEV-4084: RocksDB: Wrong result on IN subquery with index +--echo # +CREATE TABLE t36 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t36 VALUES (1,10),(2,20); +SELECT 3 IN ( SELECT a FROM t36 ); +drop table t36; + +--echo # +--echo # MDEV-4084: RocksDB: Wrong result on IN subquery with index +--echo # +CREATE TABLE t37 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a), KEY(a,b)) + ENGINE=RocksDB; +INSERT INTO t37 VALUES (1,10,'x'), (2,20,'y'); +SELECT MAX(a) FROM t37 WHERE a < 100; +DROP TABLE t37; + +--echo # +--echo # MDEV-4090: RocksDB: Wrong result (duplicate rows) on range access with secondary key and ORDER BY DESC +--echo # +CREATE TABLE t38 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t38 VALUES (1,10), (2,20); +SELECT i FROM t38 WHERE i NOT IN (8) ORDER BY i DESC; +drop table t38; + +--echo # +--echo # MDEV-4092: RocksDB: Assertion `in_table(pa, a_len)' fails in Rdb_key_def::cmp_full_keys +--echo # with a multi-part key and ORDER BY .. DESC +--echo # +CREATE TABLE t40 (pk1 INT PRIMARY KEY, a INT, b VARCHAR(1), KEY(b,a)) ENGINE=RocksDB; +INSERT INTO t40 VALUES (1, 7,'x'),(2,8,'y'); + +CREATE TABLE t41 (pk2 INT PRIMARY KEY) ENGINE=RocksDB; +INSERT INTO t41 VALUES (1),(2); + +SELECT * FROM t40, t41 WHERE pk1 = pk2 AND b = 'o' ORDER BY a DESC; +DROP TABLE t40,t41; + +--echo # +--echo # MDEV-4093: RocksDB: IN subquery by secondary key with NULL among values returns true instead of NULL +--echo # +CREATE TABLE t42 (pk INT PRIMARY KEY, a INT, KEY(a)) ENGINE=RocksDB; +INSERT INTO t42 VALUES (1, NULL),(2, 8); +SELECT ( 3 ) NOT IN ( SELECT a FROM t42 ); +DROP TABLE t42; + +--echo # +--echo # MDEV-4094: RocksDB: Wrong result on SELECT and ER_KEY_NOT_FOUND on +--echo # DELETE with search by NULL-able secondary key ... +--echo # +CREATE TABLE t43 (pk INT PRIMARY KEY, a INT, b CHAR(1), KEY(a)) ENGINE=RocksDB; +INSERT INTO t43 VALUES (1,8,'g'),(2,9,'x'); +UPDATE t43 SET pk = 10 WHERE a = 8; +REPLACE INTO t43 ( a ) VALUES ( 8 ); +REPLACE INTO t43 ( b ) VALUES ( 'y' ); +SELECT * FROM t43 WHERE a = 8; +DELETE FROM t43 WHERE a = 8; +DROP TABLE t43; + +--echo # +--echo # Basic AUTO_INCREMENT tests +--echo # +create table t44(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +insert into t44 (col1) values ('row1'); +insert into t44 (col1) values ('row2'); +insert into t44 (col1) values ('row3'); +select * from t44; +drop table t44; + +--echo # +--echo # ALTER TABLE tests +--echo # +create table t45 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t45 values (1, 'row1'); +insert into t45 values (2, 'row2'); +alter table t45 rename t46; +select * from t46; +drop table t46; +--error ER_BAD_TABLE_ERROR +drop table t45; + + +--echo # +--echo # Check Bulk loading +--echo # Bulk loading used to overwrite existing data +--echo # Now it fails if there is data overlap with what +--echo # already exists +--echo # +show variables +where + variable_name like 'rocksdb%' and + variable_name not like 'rocksdb_supported_compression_types'; + +create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; +insert into t47 values (1, 'row1'); +insert into t47 values (2, 'row2'); +set rocksdb_bulk_load=1; +insert into t47 values (3, 'row3'),(4, 'row4'); +set rocksdb_bulk_load=0; +# Check concurrent bulk loading +--connect (con1,localhost,root,,) +set rocksdb_bulk_load=1; +insert into t47 values (10, 'row10'),(11, 'row11'); +--connection default +set rocksdb_bulk_load=1; +insert into t47 values (100, 'row100'),(101, 'row101'); +--disconnect con1 +--connection default +set rocksdb_bulk_load=0; +--disable_query_log +let $wait_condition = select count(*) = 8 as c from t47; +--source include/wait_condition.inc +--enable_query_log +select * from t47; +drop table t47; + +--echo # +--echo # Fix TRUNCATE over empty table (transaction is committed when it wasn't +--echo # started) +--echo # +create table t48(pk int primary key auto_increment, col1 varchar(12)) engine=rocksdb; +set autocommit=0; +#--error ER_ILLEGAL_HA +truncate table t48; +set autocommit=1; +drop table t48; + +--echo # +--echo # MDEV-4059: RocksDB: query waiting for a lock cannot be killed until query timeout exceeded +--echo # +--enable_connect_log + +create table t49 (pk int primary key, a int) engine=RocksDB; +insert into t49 values (1,10),(2,20); +begin; +update t49 set a = 100 where pk = 1; + +--connect (con1,localhost,root,,) +--let $con1_id = `SELECT CONNECTION_ID()` +set rocksdb_lock_wait_timeout=60; +set @var1= to_seconds(now()); +send update t49 set a = 1000 where pk = 1; + +--connect (con2,localhost,root,,) +--echo kill query \$con1_id; +--disable_query_log +# If we immeditely kill the query - internally the condition broadcast can +# occur before the lock is waiting on the condition, thus the broadcast call +# is lost. Sleep 1 second to avoid this condition. +--sleep 1 +eval kill query $con1_id; +--enable_query_log +--connection con1 +--error ER_QUERY_INTERRUPTED +--reap +set @var2= to_seconds(now()); + +# We expect the time to kill query in con1 should be below +# rocksdb_lock_wait_timeout (60). +select if ((@var2 - @var1) < 60, "passed", (@var2 - @var1)) as 'result'; + +--connection default +--disconnect con1 + +commit; +drop table t49; + +--echo # +--echo # Index-only tests for INT-based columns +--echo # +create table t1 (pk int primary key, key1 int, col1 int, key(key1)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,2,2); +insert into t1 values (-5,-5,-5); +--echo # INT column uses index-only: +--replace_column 9 # +explain +select key1 from t1 where key1=2; +select key1 from t1 where key1=2; +select key1 from t1 where key1=-5; +drop table t1; + + +create table t2 (pk int primary key, key1 int unsigned, col1 int, key(key1)) engine=rocksdb; +insert into t2 values (1,1,1), (2,2,2); +--echo # INT UNSIGNED column uses index-only: +--replace_column 9 # +explain +select key1 from t2 where key1=2; +select key1 from t2 where key1=2; +drop table t2; + + +create table t3 (pk bigint primary key, key1 bigint, col1 int, key(key1)) engine=rocksdb; +insert into t3 values (1,1,1), (2,2,2); +--echo # BIGINT uses index-only: +--replace_column 9 # +explain +select key1 from t3 where key1=2; +select key1 from t3 where key1=2; +drop table t3; + +--echo # +--echo # Index-only reads for string columns +--echo # +create table t1 ( + pk int primary key, + key1 char(10) character set binary, + col1 int, + key (key1) +) engine=rocksdb; +insert into t1 values(1, 'one',11), (2,'two',22); +--replace_column 9 # +explain +select key1 from t1 where key1='one'; +--echo # The following will produce no rows. This looks like a bug, +--echo # but it is actually correct behavior. Binary strings are end-padded +--echo # with \0 character (and not space). Comparison does not ignore +--echo # the tail of \0. +select key1 from t1 where key1='one'; +--replace_column 9 # +explain +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +select hex(key1) from t1 where key1='one\0\0\0\0\0\0\0'; +drop table t1; + + +create table t2 ( + pk int primary key, + key1 char(10) collate latin1_bin, + col1 int, + key (key1) +) engine=rocksdb; +insert into t2 values(1, 'one',11), (2,'two',22); +--replace_column 9 # +explain +select key1 from t2 where key1='one'; +select key1 from t2 where key1='one'; +drop table t2; + + +create table t3 ( + pk int primary key, + key1 char(10) collate utf8_bin, + col1 int, + key (key1) +) engine=rocksdb; +insert into t3 values(1, 'one',11), (2,'two',22); +--replace_column 9 # +explain +select key1 from t3 where key1='one'; +select key1 from t3 where key1='one'; +drop table t3; + + +--echo # a VARCHAR column +create table t4 ( + pk int primary key, + key1 varchar(10) collate latin1_bin, + key(key1) +) engine=rocksdb; +insert into t4 values(1, 'one'), (2,'two'),(3,'threee'),(55,'fifty-five'); + +--replace_column 9 # +explain +select key1 from t4 where key1='two'; +select key1 from t4 where key1='two'; + +select key1 from t4 where key1='fifty-five'; + +--replace_column 9 # +explain +select key1 from t4 where key1 between 's' and 'u'; +select key1 from t4 where key1 between 's' and 'u'; + +drop table t4; + +--echo # +--echo # MDEV-4305: RocksDB: Assertion `((keypart_map + 1) & keypart_map) == 0' fails in calculate_key_len +--echo # +CREATE TABLE t1 (pk1 INT, pk2 CHAR(32), i INT, PRIMARY KEY(pk1,pk2), KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'test1',6),(2,'test2',8); +SELECT * FROM t1 WHERE i != 3 OR pk1 > 9; +DROP TABLE t1; + +--echo # +--echo # MDEV-4298: RocksDB: Assertion `thd->is_error() || kill_errno' fails in ha_rows filesort +--echo # +call mtr.add_suppression("Sort aborted"); +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, KEY(i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1),(2,2); +BEGIN; +UPDATE t1 SET i = 100; + +--connect (con1,localhost,root,,test) +--error ER_LOCK_WAIT_TIMEOUT +DELETE IGNORE FROM t1 ORDER BY i; +--disconnect con1 + +--connection default +COMMIT; +DROP TABLE t1; + +--echo # +--echo # MDEV-4324: RocksDB: Valgrind "Use of uninitialised value" warnings on inserting value into varchar field +--echo # (testcase only) +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, c VARCHAR(4)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'), (2,'bar'); +DROP TABLE t1; + +--echo # +--echo # MDEV-4304: RocksDB: Index-only scan by a field with utf8_bin collation returns garbage symbols +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, c1 CHAR(1), c2 CHAR(1), KEY(c1)) ENGINE=RocksDB CHARSET utf8 COLLATE utf8_bin; +INSERT INTO t1 VALUES (1,'h','h'); +SELECT * FROM t1; +SELECT c1 FROM t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-4300: RocksDB: Server crashes in inline_mysql_mutex_lock on SELECT .. FOR UPDATE +--echo # +CREATE TABLE t2 (pk INT PRIMARY KEY, i INT, KEY (i)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,4),(2,5); +SELECT 1 FROM t2 WHERE i < 0 FOR UPDATE; +DROP TABLE t2; + +--echo # +--echo # MDEV-4301: RocksDB: Assertion `pack_info != __null' fails in Rdb_key_def::unpack_record +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, c CHAR(1), KEY(c,i)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,4,'d'),(2,8,'e'); +SELECT MAX( pk ) FROM t1 WHERE i = 105 AND c = 'h'; +DROP TABLE t1; + +--echo # +--echo # MDEV-4337: RocksDB: Inconsistent results comparing a char field with an int field +--echo # +create table t1 (c char(1), i int, primary key(c), key(i)) engine=RocksDB; +insert into t1 values ('2',2),('6',6); +select * from t1 where c = i; +select * from t1 ignore index (i) where c = i; +drop table t1; + + +--echo # +--echo # Test statement rollback inside a transaction +--echo # +create table t1 (pk varchar(12) primary key) engine=rocksdb; +insert into t1 values ('old-val1'),('old-val2'); + +create table t2 (pk varchar(12) primary key) engine=rocksdb; +insert into t2 values ('new-val2'),('old-val1'); + +begin; +insert into t1 values ('new-val1'); +--error ER_DUP_ENTRY +insert into t1 select * from t2; +commit; + +select * from t1; +drop table t1, t2; + +--echo # +--echo # MDEV-4383: RocksDB: Wrong result of DELETE .. ORDER BY .. LIMIT: +--echo # rows that should be deleted remain in the table +--echo # +CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; +CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=RocksDB; + +INSERT INTO t1 (pk) VALUES (NULL),(NULL); +BEGIN; +INSERT INTO t2 (pk) VALUES (NULL),(NULL); +INSERT INTO t1 (pk) VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); + +--enable_info +SELECT * FROM t1 ORDER BY pk LIMIT 9; +DELETE FROM t1 ORDER BY pk LIMIT 9; +SELECT * FROM t1 ORDER BY pk LIMIT 9; +--disable_info + +DROP TABLE t1,t2; + +--echo # +--echo # MDEV-4374: RocksDB: Valgrind warnings 'Use of uninitialised value' on +--echo # inserting into a varchar column +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, a VARCHAR(32)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); +DROP TABLE t1; + + +--echo # +--echo # MDEV-4061: RocksDB: Changes from an interrupted query are still applied +--echo # + +--enable_connect_log + +create table t1 (pk int primary key, a int) engine=RocksDB; +insert into t1 values (1,10),(2,20); + +--let $con_id = `select connection_id()` + +set autocommit = 1; +--send +update t1 set a = sleep(100) where pk = 1; + +--connect (con1,localhost,root,,) +--echo kill query \$con_id; +--disable_query_log +eval kill query $con_id; +--enable_query_log + +--connection default +--error ER_QUERY_INTERRUPTED +--reap + +select * from t1; +--disconnect con1 +--disable_connect_log +drop table t1; + + +--echo # +--echo # MDEV-4099: RocksDB: Wrong results with index and range access after INSERT IGNORE or REPLACE +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, a SMALLINT, b INT, KEY (a)) ENGINE=RocksDB; +INSERT IGNORE INTO t1 VALUES (1, 157, 0), (2, 1898, -504403), (1, -14659, 0); +SELECT * FROM t1; +SELECT pk FROM t1; +SELECT * FROM t1 WHERE a != 97; +DROP TABLE t1; + + +--echo # +--echo # Test @@rocksdb_max_row_locks +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, a int) ENGINE=RocksDB; +set @a=-1; +insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; +set @tmp1= @@rocksdb_max_row_locks; +set rocksdb_max_row_locks= 20; +--error ER_GET_ERRMSG +update t1 set a=a+10; +DROP TABLE t1; + + +--echo # +--echo # Test AUTO_INCREMENT behavior problem, +--echo # "explicit insert into an auto-inc column is not noticed by RocksDB" +--echo # +create table t1 (i int primary key auto_increment) engine=RocksDB; + +insert into t1 values (null); +insert into t1 values (null); +select * from t1; +drop table t1; + +create table t2 (i int primary key auto_increment) engine=RocksDB; + +insert into t2 values (1); +select * from t2; + +--echo # this fails (ie. used to fail), RocksDB engine did not notice use of '1' above +insert into t2 values (null); +select * from t2; + +--echo # but then this succeeds, so previous statement must have incremented next number counter +insert into t2 values (null); +select * from t2; +drop table t2; + +--echo # +--echo # Fix Issue#2: AUTO_INCREMENT value doesn't survive server shutdown +--echo # +create table t1 (i int primary key auto_increment) engine=RocksDB; + +insert into t1 values (null); +insert into t1 values (null); + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; + +--source include/restart_mysqld.inc + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +insert into t1 values (null); +select * from t1; + +drop table t1; + +--echo # +--echo # Fix Issue #3: SHOW TABLE STATUS shows Auto_increment=0 +--echo # +create table t1 (i int primary key auto_increment) engine=RocksDB; + +insert into t1 values (null),(null); +--replace_column 7 # +show table status like 't1'; +drop table t1; + +--echo # +--echo # Fix Issue #4: Crash when using pseudo-unique keys +--echo # +CREATE TABLE t1 ( + i INT, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT, + pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, + UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +DROP TABLE t1; + +--echo # +--echo # Fix issue #5: Transaction rollback doesn't undo all changes. +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (id int auto_increment primary key, value int) engine=rocksdb; + +set autocommit=0; +begin; +set @a:=0; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +insert into t1 select @a:=@a+1, @a from t0 A, t0 B, t0 C, t0 D where D.a<4; +rollback; +select count(*) from t1; + +set autocommit=1; +drop table t0, t1; + +--echo # +--echo # Check status variables +--echo # +--replace_column 2 # +show status like 'rocksdb%'; + +select VARIABLE_NAME from INFORMATION_SCHEMA.global_status where VARIABLE_NAME LIKE 'rocksdb%'; +--echo # RocksDB-SE's status variables are global internally +--echo # but they are shown as both session and global, like InnoDB's status vars. +select VARIABLE_NAME from INFORMATION_SCHEMA.session_status where VARIABLE_NAME LIKE 'rocksdb%'; + + +--echo # +--echo # Fix issue #9: HA_ERR_INTERNAL_ERROR when running linkbench +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int primary key, + col1 varchar(255), + key(col1) +) engine=rocksdb; +insert into t1 select a, repeat('123456789ABCDEF-', 15) from t0; +select * from t1 where pk=3; +drop table t0, t1; + +--echo # +--echo # Fix issue #10: Segfault in Rdb_key_def::get_primary_key_tuple +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +CREATE TABLE t1 ( + id1 bigint(20) unsigned NOT NULL DEFAULT '0', + id2 bigint(20) unsigned NOT NULL DEFAULT '0', + link_type bigint(20) unsigned NOT NULL DEFAULT '0', + visibility tinyint(3) NOT NULL DEFAULT '0', + data varchar(255) NOT NULL DEFAULT '', + time bigint(20) unsigned NOT NULL DEFAULT '0', + version int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (link_type,id1,id2) +) engine=rocksdb; + +insert into t1 select a,a,a,1,a,a,a from t0; + +alter table t1 add index id1_type (id1,link_type,visibility,time,version,data); +select * from t1 where id1 = 3; + +drop table t0,t1; + +--echo # +--echo # Test column families +--echo # + +create table t1 ( + pk int primary key, + col1 int, + col2 int, + key(col1) comment 'cf3', + key(col2) comment 'cf4' +) engine=rocksdb; + +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); + +--replace_column 9 # +explain +select * from t1 where col1=2; +select * from t1 where col1=2; + +--replace_column 9 # +explain +select * from t1 where col2=3; +select * from t1 where col2=3; + +select * from t1 where pk=4; + +drop table t1; + +--echo # +--echo # Try primary key in a non-default CF: +--echo # +create table t1 ( + pk int, + col1 int, + col2 int, + key(col1) comment 'cf3', + key(col2) comment 'cf4', + primary key (pk) comment 'cf5' +) engine=rocksdb; +insert into t1 values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5); + +--replace_column 9 # +explain +select * from t1 where col1=2; +select * from t1 where col1=2; + +select * from t1 where pk=4; + +drop table t1; + +--echo # +--echo # Issue #15: SIGSEGV from reading in blob data +--echo # +CREATE TABLE t1 ( + id int not null, + blob_col text, + PRIMARY KEY (id) +) ENGINE=ROCKSDB CHARSET=latin1; + +INSERT INTO t1 SET id=123, blob_col=repeat('z',64000) ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE blob_col=VALUES(blob_col); +DROP TABLE t1; + + +--echo # +--echo # Issue #17: Automatic per-index column families +--echo # +create table t1 ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment '$per_index_cf' +) engine=rocksdb; + +--echo #Same CF ids with different CF flags +--error ER_UNKNOWN_ERROR +create table t1_err ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment 'test.t1.key1' +) engine=rocksdb; + +create table t1_err ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment 'test.t1.key2' +) engine=rocksdb; +drop table t1_err; + +--echo # Unfortunately there is no way to check which column family everything goes to +insert into t1 values (1,1); +select * from t1; +--echo # Check that ALTER and RENAME are disallowed +--error ER_NOT_SUPPORTED_YET +alter table t1 add col2 int; + +--error ER_NOT_SUPPORTED_YET +rename table t1 to t2; + +drop table t1; + +--echo # Check detection of typos in \$per_index_cf +--error ER_NOT_SUPPORTED_YET +create table t1 ( + id int not null, + key1 int, + PRIMARY KEY (id), + index (key1) comment '$per_idnex_cf' +)engine=rocksdb; + + +--echo # +--echo # Issue #22: SELECT ... FOR UPDATE takes a long time +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + id1 int, + id2 int, + value1 int, + value2 int, + primary key(id1, id2) COMMENT 'new_column_family', + key(id2) +) engine=rocksdb default charset=latin1 collate=latin1_bin; + +insert into t1 select A.a, B.a, 31, 1234 from t0 A, t0 B; + +--replace_column 9 # +explain +select * from t1 where id1=30 and value1=30 for update; + +set @var1=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_read'); + +select * from t1 where id1=3 and value1=3 for update; + +set @var2=(select variable_value + from information_schema.global_status + where variable_name='rocksdb_number_keys_read'); +--echo # The following must return true (before the fix, the difference was 70): +select if((@var2 - @var1) < 30, 1, @var2-@var1); + +drop table t0,t1; + +--echo # +--echo # Issue #33: SELECT ... FROM rocksdb_table ORDER BY primary_key uses sorting +--echo # +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1),(2,2),(3,3); +--echo # The following must not use 'Using filesort': +--replace_column 9 # +explain select * from t1 ORDER BY id; +drop table t1; + +--echo # +--echo # Issue #26: Index-only scans for DATETIME and TIMESTAMP +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +--echo # Try a DATETIME column: +create table t1 ( + pk int auto_increment primary key, + kp1 datetime, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 datetime not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +drop table t1,t2; + +--echo # Try a DATE column: +create table t1 ( + pk int auto_increment primary key, + kp1 date, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01', interval a day), a from t0; + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01' and '2015-01-05'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 date not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +drop table t1,t2; + +--echo # +--echo # Try a TIMESTAMP column: +--echo # +create table t1 ( + pk int auto_increment primary key, + kp1 timestamp, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t1 (kp1,kp2) +select date_add('2015-01-01 12:34:56', interval a day), a from t0; + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 timestamp not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2015-01-01 00:00:00' and '2015-01-05 23:59:59'; +drop table t1,t2; + +--echo # +--echo # Try a TIME column: +--echo # +create table t1 ( + pk int auto_increment primary key, + kp1 time, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +--disable_warnings +insert into t1 (kp1,kp2) +select date_add('2015-01-01 09:00:00', interval a minute), a from t0; +--enable_warnings + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 time not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '09:01:00' and '09:05:00'; +drop table t1,t2; + +--echo # +--echo # Try a YEAR column: +--echo # +create table t1 ( + pk int auto_increment primary key, + kp1 year, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +--disable_warnings +insert into t1 (kp1,kp2) select 2015+a, a from t0; +--enable_warnings + +select * from t1; + +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; + +select kp1,kp2 from t1 force index (kp1) +where kp1 between '2016' and '2020'; + +--echo # Now, the same with NOT NULL column +create table t2 ( + pk int auto_increment primary key, + kp1 year not null, + kp2 int, + col1 int, + key(kp1, kp2) +) engine=rocksdb; +insert into t2 select * from t1; +--echo # This must show 'Using index' +--replace_column 9 # +explain +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; + +select kp1,kp2 from t2 force index (kp1) +where kp1 between '2016' and '2020'; + +drop table t1,t2; + +--echo # +--echo # Issue #57: Release row locks on statement errors +--echo # +create table t1 (id int primary key) engine=rocksdb; +insert into t1 values (1), (2), (3); +begin; +insert into t1 values (4), (5), (6); +--error ER_DUP_ENTRY +insert into t1 values (7), (8), (2), (9); +select * from t1; + +-- connect(con1,localhost,root,,) +--connection con1 +begin; +--error ER_LOCK_WAIT_TIMEOUT +select * from t1 where id=4 for update; + +select * from t1 where id=7 for update; + +select * from t1 where id=9 for update; + +--connection default +-- disconnect con1 +drop table t1; + +--echo #Index on blob column +SET @old_mode = @@sql_mode; +SET sql_mode = 'strict_all_tables'; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(c, b(255))) engine=rocksdb; +drop table t1; +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(1255))) engine=rocksdb; +insert into t1 values (1, '1abcde', '1abcde'), (2, '2abcde', '2abcde'), (3, '3abcde', '3abcde'); +select * from t1; +--replace_column 9 # +explain select * from t1 where b like '1%'; +--replace_column 9 # +explain select b, a from t1 where b like '1%'; +update t1 set b= '12345' where b = '2abcde'; +select * from t1; +drop table t1; +# In MariaDB, the error becomes a warning: +# --error ER_TOO_LONG_KEY +create table t1 (a int, b text, c varchar(400), Primary Key(a), Key(b(2255))) engine=rocksdb; +drop table t1; +SET sql_mode = @old_mode; + +drop table t0; + +--echo # +--echo # Fix assertion failure (attempt to overrun the key buffer) for prefix indexes +--echo # + +create table t1 ( + pk int primary key, + col1 varchar(100), + key (col1(10)) +) engine=rocksdb; + +insert into t1 values (1, repeat('0123456789', 9)); + +drop table t1; + +--echo # +--echo # Issue #76: Assertion `buf == table->record[0]' fails in virtual int ha_rocksdb::delete_row(const uchar*) +--echo # + +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; + +CREATE TRIGGER tr AFTER DELETE ON t1 FOR EACH ROW DELETE FROM t2 WHERE pk = old.pk; + +INSERT INTO t1 VALUES (1,1); +REPLACE INTO t1 VALUES (1,2); + +SELECT * FROM t1; +DROP TABLE t1, t2; + +--echo # +--echo # Issue #99: UPDATE for table with VARCHAR pk gives "Can't find record" error +--echo # +create table t1(a int primary key); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t2 ( + a varchar(32) primary key, + col1 int +) engine=rocksdb; + +insert into t2 +select concat('v-', 100 + A.a*100 + B.a), 12345 from t1 A, t1 B; +update t2 set a=concat('x-', a) where a between 'v-1002' and 'v-1004'; + +drop table t1,t2; + +--echo # +--echo # Issue #131: Assertion `v->cfd_->internal_comparator().Compare(start, end) <= 0' failed +--echo # +CREATE TABLE t2(c1 INTEGER UNSIGNED NOT NULL, c2 INTEGER NULL, c3 TINYINT, c4 SMALLINT , c5 MEDIUMINT, c6 INT, c7 BIGINT, PRIMARY KEY(c1,c6)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1,1,1,1,1,1); +SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +EXPLAIN SELECT * FROM t2 WHERE c1 > 4294967295 ORDER BY c1,c6; +drop table t2; + +--echo # +--echo # Issue #135: register transaction was not being called for statement +--echo # +--disable_warnings +DROP DATABASE IF EXISTS test_db; +--enable_warnings +CREATE DATABASE test_db; +CREATE TABLE test_db.t1(c1 INT PRIMARY KEY); +LOCK TABLES test_db.t1 READ; +SET AUTOCOMMIT=0; +SELECT c1 FROM test_db.t1; +START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY; +DROP DATABASE test_db; + +--echo # +--echo # Issue #143: Split rocksdb_bulk_load option into two +--echo # +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +SET unique_checks=0; +INSERT INTO t1 VALUES(1, 1); +INSERT INTO t1 VALUES(1, 2); +INSERT INTO t1 VALUES(1, 3); +SELECT * FROM t1; +--error ER_UNKNOWN_ERROR +REPLACE INTO t1 VALUES(4, 4); +--error ER_UNKNOWN_ERROR +INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; +TRUNCATE TABLE t1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +SET unique_checks=1; +SET rocksdb_commit_in_the_middle=1; +SET rocksdb_bulk_load_size=10; +BEGIN; +INSERT INTO t1 (id) VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10), + (11),(12),(13),(14),(15),(16),(17),(18),(19); +ROLLBACK; +SELECT * FROM t1; +INSERT INTO t1 (id) VALUES (11),(12),(13),(14),(15); +BEGIN; +UPDATE t1 SET value=100; +ROLLBACK; +SELECT * FROM t1; +BEGIN; +DELETE FROM t1; +ROLLBACK; +SELECT * FROM t1; +SET rocksdb_commit_in_the_middle=0; +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +DROP TABLE t1; + +--echo # +--echo # Issue #185 Assertion `BaseValid()' failed in void rocksdb::BaseDeltaIterator::Advance() +--echo # +CREATE TABLE t2(id INT NOT NULL PRIMARY KEY, data INT) Engine=MEMORY; +INSERT INTO t2 VALUES (100,NULL),(150,"long varchar"),(200,"varchar"),(250,"long long long varchar"); +create TABLE t1 (a int not null, b int not null, primary key(a,b)); +INSERT INTO t1 VALUES (1,1); +SELECT a FROM t1, t2 WHERE a=b AND (b NOT IN (SELECT a FROM t1 WHERE a > 4)); +DROP TABLE t1, t2; + +--echo # +--echo # Issue #189 ha_rocksdb::load_auto_incr_value() creates implicit snapshot and doesn't release +--echo # +--connect (con1,localhost,root,,) +create table r1 (id int auto_increment primary key, value int); +insert into r1 (id) values (null), (null), (null), (null), (null); +connection con1; +create table r2 like r1; +show create table r2; +connection default; +begin; +insert into r1 values (10, 1); +commit; +connection con1; +begin; +select * from r1; +commit; +connection default; +drop table r1, r2; + +# hidden primary key +create table r1 (id int auto_increment, value int, index i(id)); +insert into r1 (id) values (null), (null), (null), (null), (null); +connection con1; +create table r2 like r1; +show create table r2; +connection default; +begin; +insert into r1 values (10, 1); +commit; +connection con1; +begin; +select * from r1; +commit; +connection default; +drop table r1, r2; + +disconnect con1; + +--echo # +--echo # Issue#211 Crash on LOCK TABLES + START TRANSACTION WITH CONSISTENT SNAPSHOT +--echo # +CREATE TABLE t1(c1 INT); +lock TABLE t1 read local; +SELECT 1 FROM t1 GROUP BY TRIM(LEADING RAND()FROM''); +set AUTOCOMMIT=0; +start transaction with consistent snapshot; +SELECT * FROM t1; +COMMIT; +UNLOCK TABLES; +DROP TABLE t1; + +--echo # +--echo # Issue#213 Crash on LOCK TABLES + partitions +--echo # +CREATE TABLE t1(a INT,b INT,KEY (b)) engine=rocksdb PARTITION BY HASH(a) PARTITIONS 2; +INSERT INTO t1(a)VALUES (20010101101010.999949); +lock tables t1 write,t1 as t0 write,t1 as t2 write; +SELECT a FROM t1 ORDER BY a; +truncate t1; +INSERT INTO t1 VALUES(X'042000200020',X'042000200020'),(X'200400200020',X'200400200020'); +UNLOCK TABLES; +DROP TABLE t1; + +--echo # +--echo # Issue#250: MyRocks/Innodb different output from query with order by on table with index and decimal type +--echo # (the test was changed to use VARCHAR, because DECIMAL now supports index-only, and this issue +--echo # needs a datype that doesn't support index-inly) +--echo # + +CREATE TABLE t1( + c1 varchar(10) character set utf8 collate utf8_general_ci NOT NULL, + c2 varchar(10) character set utf8 collate utf8_general_ci, + c3 INT, + INDEX idx(c1,c2) +); +INSERT INTO t1 VALUES ('c1-val1','c2-val1',5); +INSERT INTO t1 VALUES ('c1-val2','c2-val3',6); +INSERT INTO t1 VALUES ('c1-val3','c2-val3',7); +SELECT * FROM t1 force index(idx) WHERE c1 <> 'c1-val2' ORDER BY c1 DESC; +--replace_column 9 # +explain SELECT * FROM t1 force index(idx) WHERE c1 <> '1' ORDER BY c1 DESC; +drop table t1; + +--echo # +--echo # Issue#267: MyRocks issue with no matching min/max row and count(*) +--echo # +CREATE TABLE t1(c1 INT UNSIGNED, c2 INT SIGNED, INDEX idx2(c2)); +INSERT INTO t1 VALUES(1,null); +INSERT INTO t1 VALUES(2,null); +SELECT count(*) as total_rows, min(c2) as min_value FROM t1; +DROP TABLE t1; + +--echo # +--echo # Issue#263: MyRocks auto_increment skips values if you insert a negative value +--echo # +# We have slightly different behavior regarding auto-increment values than +# InnoDB, so the results of the SHOW TABLE STATUS command will be slightly +# different. InnoDB will reserve 3 values but only use 2 of them (because +# the user hard-coded a -1 as the second value). MyRocks will only reserve +# the values as needed, so only 2 values will be used. This means that the +# SHOW TABLE STATUS in InnoDB will indicate that the next auto-increment +# value is 4 while MyRocks will show it as 3. +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(-1),(0); +SHOW TABLE STATUS LIKE 't1'; +SELECT * FROM t1; +DROP TABLE t1; +CREATE TABLE t1(a INT AUTO_INCREMENT KEY); +INSERT INTO t1 VALUES(0),(10),(0); +SHOW TABLE STATUS LIKE 't1'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Issue #411: Setting rocksdb_commit_in_the_middle commits transaction +--echo # without releasing iterator +--echo # + +CREATE TABLE t1 (id1 bigint(20), + id2 bigint(20), + id3 bigint(20), + PRIMARY KEY (id1, id2, id3)) + DEFAULT CHARSET=latin1; + +CREATE TABLE t2 (id1 bigint(20), + id2 bigint(20), + PRIMARY KEY (id1, id2)) + DEFAULT CHARSET=latin1; + + +set rocksdb_commit_in_the_middle=1; +SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; +set rocksdb_bulk_load_size = 100; + +--disable_query_log +let $j = 10000; +while ($j) +{ + --eval insert into t1 (id1, id2, id3) values (0, $j, 0); + --eval insert into t2 (id1, id2) values (0, $j); + dec $j; +} +--enable_query_log + +DELETE t2, t1 FROM t2 LEFT JOIN t1 ON t2.id2 = t1.id2 AND t2.id1 = t1.id1 WHERE t2.id1 = 0; + +SET rocksdb_bulk_load_size= @save_rocksdb_bulk_load_size; +SET rocksdb_commit_in_the_middle=0; +DROP TABLE t1, t2; + + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt new file mode 100644 index 0000000000000..95d819ee425f3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options="write_buffer_size=12m;target_file_size_base=1m;max_bytes_for_level_multiplier=10" diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh new file mode 100755 index 0000000000000..9381de1fafcec --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_options-master.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cat > $MYSQL_TMP_DIR/cf_configs.cnf < $_expect_file_name +--error 1 +--source include/wait_until_connected_again.inc + +# invalid cf option config (no curly braces) + +--exec echo "restart:--rocksdb_override_cf_options=cf1=write_buffer_size=8m" > $_expect_file_name +--error 1 +--source include/wait_until_connected_again.inc + +# invalid cf option config (cf listed twice) + +--exec echo "restart:--rocksdb_override_cf_options=cf1={write_buffer_size=8m};cf1={target_file_size_base=2m}" > $_expect_file_name +--error 1 +--source include/wait_until_connected_again.inc + +# clean up + +--source include/restart_mysqld.inc + +--echo +drop table t1,t2,t3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test new file mode 100644 index 0000000000000..93febbc1319fa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test @@ -0,0 +1,496 @@ +--source include/have_rocksdb.inc + +--source include/have_partition.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +--enable_warnings + +# +# Create a table with multiple partitions, but in the comment don't specify +# that per-partition based column families (CF) should be created. Expect that +# default CF will be used and new one won't be created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +# Expecting no results here. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment'; + +DROP TABLE t1; + +# +# Same test case as above, only with the reverse CF. Should result in the same +# behavior. No new CF-s created, only default one will be used. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +# Expecting no results here. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment'; + +DROP TABLE t1; + +# +# Create a table with multiple partitions and request for separate CF to be +# created per every partition. As a result we expect three different CF-s to be +# created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +set @@global.rocksdb_compact_cf = 'baz'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz'; + +DROP TABLE t1; + +# +# Same test case as above, only one of the partitions has "rev:" prefix. The +# intent here is to make sure that qualifier can specify reverse CF as well. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +set @@global.rocksdb_compact_cf = 't1-p0'; +set @@global.rocksdb_compact_cf = 'rev:bar'; +set @@global.rocksdb_compact_cf = 't1-p2'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2'; + +DROP TABLE t1; + + +# +# Create a table with multiple partitions and assign two partitions to the same +# CF, third one gets a separate partition, and fourth one will belong to a +# default one. As a result we expect two new CF-s to be created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9), + PARTITION custom_p3 VALUES IN (10, 20, 30) +); + +set @@global.rocksdb_compact_cf = 'cf-zero'; +set @@global.rocksdb_compact_cf = 'cf-one'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one'; + +DROP TABLE t1; + +# +# Create a table with CF-s per partition and verify that ALTER TABLE + DROP +# INDEX work for that scenario and data is persisted. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +SELECT * FROM t1; +ALTER TABLE t1 DROP PRIMARY KEY; +SELECT * FROM t1; + +# +# Verify that we can compact custom CF-s. +# +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'bar'; +set @@global.rocksdb_compact_cf = 'baz'; + +DROP TABLE t1; + +# +# Create a table with CF-s per partition and verify that ALTER TABLE + DROP +# INDEX + ADD INDEX work for that scenario and data is persisted and new cf_name_str +# are created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf'; + +set @@global.rocksdb_compact_cf = 'p0_cf'; +set @@global.rocksdb_compact_cf = 'p1_cf'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf'; + +DROP TABLE t1; + +# +# Create a table CF-s per partition, use ALTER TABLE to change the way it's +# partitioned and verify that new CF-s will be created. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +ALTER TABLE t1 PARTITION BY LIST(c1) ( + PARTITION custom_p3 VALUES IN (1, 4, 7), + PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9) +); + +ALTER TABLE t1 DROP PRIMARY KEY; +ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf'; + +set @@global.rocksdb_compact_cf = 'p3_cf'; +set @@global.rocksdb_compact_cf = 'p4_cf'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf'; + +DROP TABLE t1; + +# +# Create a table CF-s per partition, use empty qualifier name. Verify that no +# new CF-s are created. This will also make sure that nothing gets added for +# `custom_p2`. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;' +) ENGINE=ROCKSDB +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +DROP TABLE t1; + +# +# Verify some basic partition related operations when using PARTITION BY LIST +# COLUMNS on a VARBINARY column on a table with more complicated schema. +# + +# +# Verify that creating the table without COMMENT actually works. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +DROP TABLE t2; + +# +# Create the same table with two custom CF-s per partition as specified in the +# COMMENT. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +# Verify that CF-s were created earlier. +set @@global.rocksdb_compact_cf = 'my_cf0'; +set @@global.rocksdb_compact_cf = 'my_cf1'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1'; + +# Insert some random data. +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); + +# Verify it's there. +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; + +# Verify it's being fetched from the right partition. This tests partitioning +# functionality, but we want to make sure that by adding CF-s per partition we +# don't regress anything. +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; + +# Delete the current PK and create a new one referencing different CF-s. We +# need to verity that new CF-s will be created and no data will be lost in +# process. +ALTER TABLE t2 DROP PRIMARY KEY; +ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1'; + +# Verify that new CF-s are created as well. +set @@global.rocksdb_compact_cf = 'new_cf0'; +set @@global.rocksdb_compact_cf = 'new_cf1'; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1'; + +# Insert some more random data. +INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2); +INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3); + +# Verify that partition mappings are still intact. +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345; +EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456; + +# Verify that no data is lost. +SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2; + +DROP TABLE t2; + +# +# Create the same table with two custom CF-s per partition as specified in the +# COMMENT. Use both the PK and SK when creating the table. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', + KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +# Verify that CF-s were created for PK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0'; +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1'; + +# Verify that CF-s were created for SK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5'; + +# Insert some random data. +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); + +# Basic verification that correct partition and key are used when searching. +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1; + +# Remove the key. +ALTER TABLE t2 DROP KEY `col2`; + +# Add a new key and expect new CF to be created as well. +ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5'; + +# Verify that CF-s were created for SK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5'; + +# Verify that correct partition and key are used when searching. +EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567; + +DROP TABLE t2; + +# +# Verify the same scenario as before, but with a UNIQUE KEY in addition to PK. +# +CREATE TABLE `t2` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1', + UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +# Verify that CF-s were created for SK. +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5'; + +INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2); + +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2); + +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2); + +INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3); +INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5); +INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7); + +DROP TABLE t2; + +# +# Verify that both partitioned and non-partitioned table can share a CF. +# +CREATE TABLE t1 ( + `a` int, + PRIMARY KEY (a) COMMENT "sharedcf" +) ENGINE=ROCKSDB; + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf'; + +# +# We expect this to succeed. +# +CREATE TABLE t2 ( + `a` INT, + `b` DATE, + `c` VARCHAR(42), + PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf" +) ENGINE=ROCKSDB + PARTITION BY LIST(`a`) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test new file mode 100644 index 0000000000000..8e30332bafef9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_reverse.test @@ -0,0 +1,71 @@ +--source include/have_rocksdb.inc + +# +# RocksDB-SE tests for reverse-ordered Column Families +# + +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int primary key, + a int not null, + b int not null, + key(a) comment 'rev:foo', + key(b) comment 'bar' +) engine=rocksdb; + +insert into t1 select a,a,a from t0; +insert into t1 select a+10,a+10,a+10 from t0; + +--echo # Primary key is not in a reverse-ordered CF, so full table scan +--echo # returns rows in ascending order: +select * from t1; + +--replace_column 9 # +explain +select a from t1 order by a limit 5; +select a from t1 order by a limit 5; + +--replace_column 9 # +explain +select b from t1 order by b limit 5; +select a from t1 order by a limit 5; + +--replace_column 9 # +explain +select a from t1 order by a desc limit 5; +select a from t1 order by a desc limit 5; + +--replace_column 9 # +explain +select b from t1 order by b desc limit 5; +select b from t1 order by b desc limit 5; + +drop table t1; + +--echo # +--echo # Try a primary key in a reverse-ordered CF. +--echo # + +create table t2 ( + pk int, + a int not null, + primary key(pk) comment 'rev:cf1' +) engine=rocksdb; + +insert into t2 select a,a from t0; +--echo # Primary key is in a reverse-ordered CF, so full table scan +--echo # returns rows in descending order: +select * from t2; + +set autocommit=0; +begin; +delete from t2 where a=3 or a=7; +select * from t2; +rollback; +set autocommit=1; + +drop table t2; +drop table t0; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt new file mode 100644 index 0000000000000..792e3808f1ef7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums-master.opt @@ -0,0 +1,2 @@ +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test new file mode 100644 index 0000000000000..9a7704c7ab0da --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_checksums.test @@ -0,0 +1,141 @@ +--source include/have_rocksdb.inc + +# +# Tests for row checksums feature +# +--source include/have_debug.inc + +set @save_rocksdb_store_row_debug_checksums=@@global.rocksdb_store_row_debug_checksums; +set @save_rocksdb_verify_row_debug_checksums=@@global.rocksdb_verify_row_debug_checksums; +set @save_rocksdb_checksums_pct=@@global.rocksdb_checksums_pct; + +show variables like 'rocksdb_%checksum%'; + +create table t1 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t1 values (1,1,1),(2,2,2),(3,3,3); +check table t1; +--let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err +--let SEARCH_PATTERN=0 table records had checksums +--source include/search_pattern_in_file.inc + +drop table t1; + +set session rocksdb_store_row_debug_checksums=on; +create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t2 values (1,1,1),(2,2,2),(3,3,3); +check table t2; +--let SEARCH_PATTERN=3 table records had checksums +--source include/search_pattern_in_file.inc + +--echo # Now, make a table that has both rows with checksums and without +create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +insert into t3 values (1,1,1),(2,2,2),(3,3,3); +set session rocksdb_store_row_debug_checksums=off; +update t3 set b=3 where a=2; +set session rocksdb_store_row_debug_checksums=on; +check table t3; +--let SEARCH_PATTERN=2 table records had checksums +--source include/search_pattern_in_file.inc + +set session rocksdb_store_row_debug_checksums=on; +set session rocksdb_checksums_pct=5; +create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb; +--disable_query_log +let $i=0; +let $x= 100000; +while ($i<10000) +{ + inc $i; + eval insert t4(pk,a,b) values($i, $i, $i div 10); + eval update t4 set a= a+$x where a=$i; + eval update t4 set pk=pk+$x where pk=$i; +} +--enable_query_log +check table t4; +perl; +$total=10000; +$pct=5; +@out=(); + +$filename= "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; +open(F, '<', $filename) || die("Can't open file $filename: $!"); +while() { + @out=() if /^CURRENT_TEST:/; + if (/(\d+) index entries checked \((\d+) had checksums/) { + if ($1 == $total and $2 >= $total*($pct-2)/100 and $2 <= $total*($pct+2)/100) { + push @out, sprintf "%d index entries had around %d checksums\n", $total, $total*$pct/100; + } + } elsif (/(\d+) table records had checksums/) { + if ($1 >= $total*($pct-2)/100 and $1 <= $total*($pct+2)/100) { + push @out, sprintf "Around %d table records had checksums\n", $total*$pct/100; + } + } +} +print @out; +EOF +set session rocksdb_checksums_pct=100; + +--echo # +--echo # Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches. +--echo # +insert into mtr.test_suppressions values + ('Checksum mismatch in key of key-value pair for index'), + ('Checksum mismatch in value of key-value pair for index'), + ('Data with incorrect checksum'); + +--echo # 1. Start with mismatch in key checksum of the PK. +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum1"; +set session rocksdb_verify_row_debug_checksums=off; +select * from t3; +set session rocksdb_verify_row_debug_checksums=on; +--error ER_INTERNAL_ERROR +select * from t3; +--error ER_INTERNAL_ERROR +select * from t4; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum1"; + +--echo # 2. Continue with mismatch in pk value checksum. +set session debug_dbug= "+d,myrocks_simulate_bad_pk_checksum2"; +set session rocksdb_verify_row_debug_checksums=off; +select * from t3; +set session rocksdb_verify_row_debug_checksums=on; +--error ER_INTERNAL_ERROR +select * from t3; +--error ER_INTERNAL_ERROR +select * from t4; +set session debug_dbug= "-d,myrocks_simulate_bad_pk_checksum2"; + +--echo # 3. Check if we catch checksum mismatches for secondary indexes +--replace_column 9 # +explain +select * from t3 force index(a) where a<4; +select * from t3 force index(a) where a<4; + +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; +--error ER_INTERNAL_ERROR +select * from t3 force index(a) where a<4; +--error ER_INTERNAL_ERROR +select * from t4 force index(a) where a<1000000; +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; + +--echo # 4. The same for index-only reads? +--disable_query_log +set global rocksdb_force_flush_memtable_now=1; +--enable_query_log +--replace_column 9 # +explain +select a from t3 force index(a) where a<4; +select a from t3 force index(a) where a<4; + +set session debug_dbug= "+d,myrocks_simulate_bad_key_checksum1"; +--error ER_INTERNAL_ERROR +select a from t3 force index(a) where a<4; +--error ER_INTERNAL_ERROR +select a from t4 force index(a) where a<1000000; +set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1"; + +set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums; +set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums; +set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct; + +drop table t2,t3,t4; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test new file mode 100644 index 0000000000000..52f9485e6b7f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_delete.test @@ -0,0 +1,24 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +# This validates the fix for Issue #144. The problem was that with more +# than one client accessing/deleting the same row there was a possibility +# of client A finding a row (through Next() or Prev()) but the row being +# deleted before the GetForUpdate() call could occur. When this happened +# a nearly useless error was being returned. + +let $order=ASC; +let $comment=""; +--source include/rocksdb_concurrent_delete.inc + +let $order=DESC; +let $comment=""; +--source include/rocksdb_concurrent_delete.inc + +let $order=ASC; +let $comment="rev:cf2"; +--source include/rocksdb_concurrent_delete.inc + +let $order=DESC; +let $comment="rev:cf2"; +--source include/rocksdb_concurrent_delete.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py new file mode 100644 index 0000000000000..37b118d525a59 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_concurrent_insert.py @@ -0,0 +1,95 @@ +""" +This script tests concurrent inserts on a given table. +Example Usage (in Mysql Test Framework): + + CREATE TABLE t1 (a INT) ENGINE=rocksdb; + + let $exec = python suite/rocksdb/t/rocksdb_concurrent_insert.py \ + root 127.0.0.1 $MASTER_MYPORT test t1 100 4; + exec $exec; + +""" +import cStringIO +import hashlib +import MySQLdb +import os +import random +import signal +import sys +import threading +import time +import string + +def get_insert(table_name, idx): + return """INSERT INTO %s (a) VALUES (%d)""" % (table_name, idx) + +class Inserter(threading.Thread): + Instance = None + def __init__(self, con, table_name, num_inserts): + threading.Thread.__init__(self) + self.finished = False + self.num_inserts = num_inserts + con.autocommit(False) + self.con = con + self.rand = random.Random() + self.exception = None + self.table_name = table_name + Inserter.Instance = self + self.start() + def run(self): + try: + self.runme() + except Exception, e: + self.exception = traceback.format_exc() + print "caught (%s)" % e + finally: + self.finish() + def runme(self): + cur = self.con.cursor() + for i in xrange(self.num_inserts): + try: + cur.execute(get_insert(self.table_name, i)) + r = self.rand.randint(1,10) + if r < 4: + self.con.commit() + except: + cur = self.con.cursor() + try: + self.con.commit() + except Exception, e: + self.exception = traceback.format_exc() + print "caught (%s)" % e + pass + def finish(self): + self.finished = True + +if __name__ == '__main__': + if len(sys.argv) != 8: + print "Usage: rocksdb_concurrent_insert.py user host port db_name " \ + "table_name num_inserts num_threads" + sys.exit(1) + + user = sys.argv[1] + host = sys.argv[2] + port = int(sys.argv[3]) + db = sys.argv[4] + table_name = sys.argv[5] + num_inserts = int(sys.argv[6]) + num_workers = int(sys.argv[7]) + + worker_failed = False + workers = [] + for i in xrange(num_workers): + inserter = Inserter( + MySQLdb.connect(user=user, host=host, port=port, db=db), table_name, + num_inserts) + workers.append(inserter) + + for w in workers: + w.join() + if w.exception: + print "Worker hit an exception:\n%s\n" % w.exception + worker_failed = True + + if worker_failed: + sys.exit(1) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test new file mode 100644 index 0000000000000..4399dd1a401ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_datadir.test @@ -0,0 +1,33 @@ +--source include/have_rocksdb.inc +let $ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test.install.db; +let $rdb_ddir = $MYSQL_TMP_DIR/.rocksdb_datadir.test; +let $sql_file = $MYSQL_TMP_DIR/rocksdb_datadir.sql; + +--write_file $sql_file +CREATE DATABASE mysqltest; +USE mysqltest; +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES(42); +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +SELECT sleep(1); +DROP TABLE t1; +DROP DATABASE mysqltest; +EOF + +# Must ensure this directory exists before launching mysqld +mkdir $ddir; + +let $plugin_dir=`select @@plugin_dir`; +# Launch mysqld with non-standard rocksdb_datadir +exec $MYSQLD_BOOTSTRAP_CMD --plugin-dir=$plugin_dir --plugin-load=$HA_ROCKSDB_SO --datadir=$ddir --rocksdb_datadir=$rdb_ddir --default-storage-engine=rocksdb --skip-innodb --default-tmp-storage-engine=MyISAM --rocksdb < $sql_file; + +--echo Check for MANIFEST files +--list_files $rdb_ddir MANIFEST-0000* + +# Clean up +remove_files_wildcard $ddir *; +remove_files_wildcard $ddir *; +remove_files_wildcard $rdb_ddir *; +rmdir $ddir; +rmdir $rdb_ddir; +remove_file $sql_file; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc new file mode 100644 index 0000000000000..01180ea29a814 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect.inc @@ -0,0 +1,90 @@ +# +# Some basic sanity tests for deadlock detection. +# +--source include/have_rocksdb.inc + +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; + +create table t (i int primary key); +create table r1 (id int primary key, value int); +insert into r1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); +create table r2 like r1; +insert into r2 select * from r1; + +# deadlock on scanned locking reads +connect (con1,localhost,root,,); +let $con1= `SELECT CONNECTION_ID()`; +begin; +update r2 set value=100 where id=9; + +connect (con2,localhost,root,,); +let $con2= `SELECT CONNECTION_ID()`; +begin; +update r1 set value=100 where id=8; +--send select * from r2 for update; + +connection con1; +let $wait_condition = +`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con2', ' and WAITING_KEY != ""')`; +--source include/wait_condition.inc +--error ER_LOCK_DEADLOCK +select * from r1 for update; +rollback; + +connection con2; +--reap; +rollback; + +connection con1; +begin; +insert into t values (1); + +connection con2; +begin; +insert into t values (2); + +connect (con3,localhost,root,,); +begin; +insert into t values (3); + +connection con1; +--send select * from t where i = 2 for update + +connection con2; +let $wait_condition = +`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con1', ' and WAITING_KEY != ""')`; +--source include/wait_condition.inc + +--send select * from t where i = 3 for update + +connection con3; +let $wait_condition = +`SELECT CONCAT('select count(*) = 1 from information_schema.rocksdb_trx where THREAD_ID = ', '$con2', ' and WAITING_KEY != ""')`; +--source include/wait_condition.inc + +select * from t; +--error ER_LOCK_DEADLOCK +insert into t values (4), (1); +--echo # Statement should be rolled back +select * from t; +rollback; + +connection con2; +--reap +rollback; + +connection con1; +--reap +rollback; + +connection default; +disconnect con1; +disconnect con2; +disconnect con3; + +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t,r1,r2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt new file mode 100644 index 0000000000000..25b80282211c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc-master.opt @@ -0,0 +1 @@ +--transaction-isolation=read-committed diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test new file mode 100644 index 0000000000000..9757285fe8b24 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rc.test @@ -0,0 +1 @@ +--source t/rocksdb_deadlock_detect.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test new file mode 100644 index 0000000000000..9757285fe8b24 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_detect_rr.test @@ -0,0 +1 @@ +--source t/rocksdb_deadlock_detect.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc new file mode 100644 index 0000000000000..c88c7ebd20abf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.inc @@ -0,0 +1,18 @@ +# +# Stress tests deadlock detection +# + +--source include/have_rocksdb.inc + +create table t1 (a int primary key, b int) engine=rocksdb; + +set @prior_rocksdb_lock_wait_timeout = @@rocksdb_lock_wait_timeout; +set @prior_rocksdb_deadlock_detect = @@rocksdb_deadlock_detect; +set global rocksdb_lock_wait_timeout = 100000; +set global rocksdb_deadlock_detect = ON; + +exec python ../storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py root 127.0.0.1 $MASTER_MYPORT test t1 10000 10; + +set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout; +set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py new file mode 100644 index 0000000000000..3bc8a3be01093 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress.py @@ -0,0 +1,94 @@ +""" +This script stress tests deadlock detection. + +Usage: rocksdb_deadlock_stress.py user host port db_name table_name + num_iters num_threads +""" +import cStringIO +import hashlib +import MySQLdb +from MySQLdb.constants import ER +import os +import random +import signal +import sys +import threading +import time +import string +import traceback + +def is_deadlock_error(exc): + error_code = exc.args[0] + return (error_code == MySQLdb.constants.ER.LOCK_DEADLOCK) + +def get_query(table_name, idx): + # Let's assume that even indexes will always be acquireable, to make + # deadlock detection more interesting. + if idx % 2 == 0: + return """SELECT * from %s WHERE a = %d LOCK IN SHARE MODE""" % (table_name, idx) + else: + r = random.randint(1, 3); + if r == 1: + return """SELECT * from %s WHERE a = %d FOR UPDATE""" % (table_name, idx) + elif r == 2: + return """INSERT INTO %s VALUES (%d, 1) + ON DUPLICATE KEY UPDATE b=b+1""" % (table_name, idx) + else: + return """DELETE from %s WHERE a = %d""" % (table_name, idx) + +class Worker(threading.Thread): + def __init__(self, con, table_name, num_iters): + threading.Thread.__init__(self) + self.con = con + self.table_name = table_name + self.num_iters = num_iters + self.exception = None + self.start() + def run(self): + try: + self.runme() + except Exception, e: + self.exception = traceback.format_exc() + def runme(self): + cur = self.con.cursor() + for x in xrange(self.num_iters): + try: + for i in random.sample(xrange(100), 10): + cur.execute(get_query(self.table_name, i)) + self.con.commit() + except MySQLdb.OperationalError, e: + self.con.rollback() + cur = self.con.cursor() + if not is_deadlock_error(e): + raise e + +if __name__ == '__main__': + if len(sys.argv) != 8: + print "Usage: rocksdb_deadlock_stress.py user host port db_name " \ + "table_name num_iters num_threads" + sys.exit(1) + + user = sys.argv[1] + host = sys.argv[2] + port = int(sys.argv[3]) + db = sys.argv[4] + table_name = sys.argv[5] + num_iters = int(sys.argv[6]) + num_workers = int(sys.argv[7]) + + worker_failed = False + workers = [] + for i in xrange(num_workers): + w = Worker( + MySQLdb.connect(user=user, host=host, port=port, db=db), table_name, + num_iters) + workers.append(w) + + for w in workers: + w.join() + if w.exception: + print "Worker hit an exception:\n%s\n" % w.exception + worker_failed = True + + if worker_failed: + sys.exit(1) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt new file mode 100644 index 0000000000000..25b80282211c5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc-master.opt @@ -0,0 +1 @@ +--transaction-isolation=read-committed diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test new file mode 100644 index 0000000000000..67e306b8744ce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rc.test @@ -0,0 +1 @@ +--source t/rocksdb_deadlock_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test new file mode 100644 index 0000000000000..67e306b8744ce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_deadlock_stress_rr.test @@ -0,0 +1 @@ +--source t/rocksdb_deadlock_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt new file mode 100644 index 0000000000000..acc0bdaa37822 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --rocksdb_perf_context_level=2 --userstat=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test new file mode 100644 index 0000000000000..8bd93845e86d0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test @@ -0,0 +1,44 @@ +--source include/have_rocksdb.inc + + +let $cf_name=cf1; + +--source include/rocksdb_icp.inc + +--echo # +--echo # Issue #67: Inefficient index condition pushdown +--echo # +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int not null primary key, + key1 bigint(20) unsigned, + col1 int, + key (key1) +) engine=rocksdb; + +insert into t1 +select + A.a+10*B.a+100*C.a, + A.a+10*B.a+100*C.a, + 1234 +from t0 A, t0 B, t0 C; + +set @count=0; +let $save_query= +set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context + where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); + +--replace_column 9 # +explain +select * from t1 where key1=1; + +eval $save_query; +select * from t1 where key1=1; +eval $save_query; +--echo # The following must be =1, or in any case not 999: +select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment"; + +drop table t0,t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt new file mode 100644 index 0000000000000..fe129d79d6353 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=20000 --rocksdb_records_in_range=1000 --userstat=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test new file mode 100644 index 0000000000000..33914a4eac65e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp_rev.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + + +let $cf_name=rev:cf1; + +--source include/rocksdb_icp.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt new file mode 100644 index 0000000000000..c9d9edb856581 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks-master.opt @@ -0,0 +1 @@ +--rocksdb_print_snapshot_conflict_queries=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test new file mode 100644 index 0000000000000..9a25f39a8e37a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_locks.test @@ -0,0 +1,93 @@ +--source include/have_rocksdb.inc + +# +# MyRocks-specific tests for locking +# +--source include/have_debug.inc + +--enable_connect_log +create table t1 (pk int not null primary key) engine=rocksdb; + +insert into t1 values (1),(2),(3); + +set autocommit=0; +begin; +select * from t1 where pk=1 for update; + +--connect (con1,localhost,root,,) +--connection con1 +call mtr.add_suppression("Got snapshot conflict errors"); +--echo ### Connection con1 +let $ID= `select connection_id()`; +set @@rocksdb_lock_wait_timeout=500; +set autocommit=0; +begin; +--send select * from t1 where pk=1 for update; + +--connection default +--echo ### Connection default + +let $wait_condition= select 1 from INFORMATION_SCHEMA.PROCESSLIST + where ID = $ID and STATE = "Waiting for row lock"; +--source include/wait_condition.inc +## Waiting for row lock +## select connection_id(); +## select state='Waiting for row lock' from information_schema.processlist where id=2; + +rollback; + +connection con1; +reap; +rollback; +connection default; + +## +## Now, repeat the same test but let the wait time out. +## +begin; +select * from t1 where pk=1 for update; + +--connection con1 +--echo ### Connection con1 +set @@rocksdb_lock_wait_timeout=2; +set autocommit=0; +begin; +--error ER_LOCK_WAIT_TIMEOUT +select * from t1 where pk=1 for update; + +--connection default + +rollback; +set autocommit=1; + +--connection con1 +drop table t1; +--connection default + +--echo # +--echo # Now, test what happens if another transaction modified the record and committed +--echo # + +CREATE TABLE t1 ( + id int primary key, + value int +) engine=rocksdb collate latin1_bin; +insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10); + +--connection con1 +BEGIN; +SELECT * FROM t1 WHERE id=3; + +--connection default +BEGIN; +UPDATE t1 SET value=30 WHERE id=3; +COMMIT; + +--connection con1 +--error ER_LOCK_DEADLOCK +SELECT * FROM t1 WHERE id=3 FOR UPDATE; + +ROLLBACK; +--disconnect con1 +--connection default +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test new file mode 100644 index 0000000000000..59472e565ab38 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_parts.test @@ -0,0 +1,122 @@ +--source include/have_rocksdb.inc + +--source include/have_partition.inc + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +--echo # Tests for MyRocks + partitioning + +--echo # +--echo # MyRocks Issue #70: Server crashes in Rdb_key_def::get_primary_key_tuple +--echo # +CREATE TABLE t1 (pk INT PRIMARY KEY, f1 INT, f2 INT, KEY(f2)) ENGINE=RocksDB +PARTITION BY HASH(pk) PARTITIONS 2; +INSERT INTO t1 VALUES (1, 6, NULL), (2, NULL, 1); + +CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1, 1), (2, 1); + +SELECT f1 FROM t1 WHERE f2 = ( SELECT f1 FROM t2 WHERE pk = 2 ); + +drop table t1,t2; + +--echo # +--echo # Issue#105: key_info[secondary_key].actual_key_parts does not include primary key on partitioned tables +--echo # +CREATE TABLE t1 ( + id INT PRIMARY KEY, + a set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8, + b set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 default null, + c set ('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z') CHARACTER SET utf8 not null, + INDEX (a), + INDEX (b), + INDEX (c) +) ENGINE=RocksDB PARTITION BY key (id) partitions 2; + +INSERT INTO t1 (id, b) VALUES (28, 3); +UPDATE t1 SET id=8 WHERE c < 8 LIMIT 1; +check table t1; +drop table t1; + +--echo # +--echo # Issue #105, another testcase +--echo # +create table t1 ( + pk int primary key, + col1 int, + col2 int, + key (col1) comment 'rev:cf_issue105' +) engine=rocksdb partition by hash(pk) partitions 2; + +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); + +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +explain select * from t1 force index(col1) where col1=10; +select * from t1 force index(col1) where col1=10; +select * from t1 use index () where col1=10; +drop table t1; + +--echo # +--echo # Issue #108: Index-only scans do not work for partitioned tables and extended keys +--echo # +create table t1 ( + pk int primary key, + col1 int, + col2 int, + key (col1) +) engine=rocksdb partition by hash(pk) partitions 2; + +insert into t1 values (1,10,10); +insert into t1 values (2,10,10); + +insert into t1 values (11,20,20); +insert into t1 values (12,20,20); +--echo # The following must use "Using index" +explain select pk from t1 force index(col1) where col1=10; + +drop table t1; + +--echo # +--echo # Issue #214: subqueries cause crash +--echo # +create TABLE t1(a int,b int,c int,primary key(a,b)) + partition by list (b*a) (partition x1 values in (1) tablespace ts1, + partition x2 values in (3,11,5,7) tablespace ts2, + partition x3 values in (16,8,5+19,70-43) tablespace ts3); +create table t2(b binary(2)); +set session optimizer_switch='materialization=off'; +insert into t1(a,b) values(1,7); +select a from t1 where a in (select a from t1 where a in (select b from t2)); + +drop table t1, t2; + +--echo # +--echo # Issue #260: altering name to invalid value leaves table unaccessible +--echo # +CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; +INSERT INTO t1 VALUES(1,'a'); +--replace_result \\ / +--error ER_ERROR_ON_RENAME +RENAME TABLE t1 TO db3.t3; +SELECT * FROM t1; +SHOW TABLES; +# try it again to the same database +RENAME TABLE t1 TO test.t3; +SELECT * FROM t3; +SHOW TABLES; +# now try it again but with another existing database +CREATE DATABASE db3; +USE test; +RENAME TABLE t3 to db3.t2; +USE db3; +SELECT * FROM t2; +SHOW TABLES; +# cleanup +DROP TABLE t2; +use test; +DROP DATABASE db3; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt new file mode 100644 index 0000000000000..a00258bc48c20 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache-master.opt @@ -0,0 +1 @@ +--query_cache_type=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test new file mode 100644 index 0000000000000..b62002b002061 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_qcache.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# Important: +# The test needs to be run with --mysqld=--query-cache-type=1 + +-- source include/have_query_cache.inc +--enable_connect_log + +create table t1 (pk int primary key, c char(8)) engine=RocksDB; +insert into t1 values (1,'new'),(2,'new'); + +select * from t1; + +--connect (con1,localhost,root,,) + +update t1 set c = 'updated'; +#select * from t1; + +--connection default +flush status; +show status like 'Qcache_hits'; +show global status like 'Qcache_hits'; +select * from t1; +select sql_no_cache * from t1; +select * from t1 where pk = 1; +show status like 'Qcache_hits'; +--echo # MariaDB: Qcache_not_cached is not incremented for select sql_no_cache queries +--echo # so the following query produces 2, not 3: +show status like 'Qcache_not_cached'; +show global status like 'Qcache_hits'; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt new file mode 100644 index 0000000000000..6ad42e58aa22c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test new file mode 100644 index 0000000000000..f4b6096c69644 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range.test @@ -0,0 +1,196 @@ +--source include/have_rocksdb.inc + +# +# Range access test for RocksDB storage engine +# +select * from information_schema.engines where engine = 'rocksdb'; + +--disable_warnings +drop table if exists t0,t1,t2,t3,t4,t5; +--enable_warnings +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; + +create table t2 ( + pk int not null, + a int not null, + b int not null, + primary key(pk), + key(a) comment 'rev:cf1' +) engine=rocksdb; + +# 10 pk values for each value of a... +insert into t2 select A.a, FLOOR(A.a/10), A.a from t1 A; + +--echo # +--echo # HA_READ_KEY_EXACT tests +--echo # + +--echo # Original failure was here: +--replace_column 9 # +explain +select * from t2 force index (a) where a=0; +select * from t2 force index (a) where a=0; + +--echo # The rest are for code coverage: +--replace_column 9 # +explain +select * from t2 force index (a) where a=2; +select * from t2 force index (a) where a=2; + +--replace_column 9 # +explain +select * from t2 force index (a) where a=3 and pk=33; +select * from t2 force index (a) where a=3 and pk=33; + +select * from t2 force index (a) where a=99 and pk=99; +select * from t2 force index (a) where a=0 and pk=0; +select * from t2 force index (a) where a=-1; +select * from t2 force index (a) where a=-1 and pk in (101,102); +select * from t2 force index (a) where a=100 and pk in (101,102); + + +--echo # +--echo # #36: Range in form tbl.key >= const doesn't work in reverse column family +--echo # +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>=0 and a <=1; +select count(*) from t2 force index (a) where a>=0 and a <=1; + +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>=-1 and a <=1; +select count(*) from t2 force index (a) where a>=-1 and a <=1; + +--replace_column 9 # +explain +select * from t2 force index (a) where a=0 and pk>=3; +select * from t2 force index (a) where a=0 and pk>=3; + +--echo # Try edge cases where we fall over the end of the table +create table t3 like t2; +insert into t3 select * from t2; + +select * from t3 where pk>=1000000; +select * from t2 where pk>=1000000; + +--echo # +--echo # #42: Range in form tbl.key > const doesn't work in reverse column family +--echo # +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>0; +select count(*) from t2 force index (a) where a>0; + +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a>99; +select count(*) from t2 force index (a) where a>99; + +select * from t2 where pk>1000000; +select * from t3 where pk>1000000; + +--replace_column 9 # +explain +select count(*) from t2 force index (a) where a=2 and pk>25; +select count(*) from t2 force index (a) where a=2 and pk>25; + + +select * from t2 force index (a) where a>-10 and a < 1; +select * from t3 force index (a) where a>-10 and a < 1; + + +--echo # +--echo # #46: index_read_map(HA_READ_BEFORE_KEY) does not work in reverse column family +--echo # +select max(a) from t2 where a < 2; +select max(a) from t2 where a < -1; + +select max(pk) from t2 where a=3 and pk < 6; + +select max(pk) from t2 where pk < 200000; +select max(pk) from t2 where pk < 20; + +select max(a) from t3 where a < 2; +select max(a) from t3 where a < -1; +select max(pk) from t3 where pk < 200000; +select max(pk) from t3 where pk < 20; + +select max(pk) from t2 where a=3 and pk < 33; +select max(pk) from t3 where a=3 and pk < 33; + +--echo # +--echo # #48: index_read_map(HA_READ_PREFIX_LAST) does not work in reverse CF +--echo # + +--echo # Tests for search_flag=HA_READ_PREFIX_LAST_OR_PREV +--echo # Note: the next explain has "Using index condition" in fb/mysql-5.6 +--echo # but "Using where" in MariaDB because the latter does not +--echo # support ICP over reverse scans. +--replace_column 9 # +explain +select * from t2 where a between 99 and 2000 order by a desc; +select * from t2 where a between 99 and 2000 order by a desc; + +select max(a) from t2 where a <=10; +select max(a) from t2 where a <=-4; + +select max(pk) from t2 where a=5 and pk <=55; +select max(pk) from t2 where a=5 and pk <=55555; +select max(pk) from t2 where a=5 and pk <=0; + +select max(pk) from t2 where pk <=-1; +select max(pk) from t2 where pk <=999999; +select max(pk) from t3 where pk <=-1; +select max(pk) from t3 where pk <=999999; + +--echo # +--echo # Tests for search_flag=HA_READ_PREFIX_LAST +--echo # + +create table t4 ( + pk int primary key, + a int, + b int, + c int, + key(a,b,c) +) engine=rocksdb; + +insert into t4 select pk,pk,pk,pk from t2 where pk < 100; + +--replace_column 9 # +explain +select * from t4 where a=1 and b in (1) order by c desc; +select * from t4 where a=1 and b in (1) order by c desc; + +--replace_column 9 # +explain +select * from t4 where a=5 and b in (4) order by c desc; +select * from t4 where a=5 and b in (4) order by c desc; + +--echo # HA_READ_PREFIX_LAST for reverse-ordered CF +create table t5 ( + pk int primary key, + a int, + b int, + c int, + key(a,b,c) comment 'rev:cf2' +) engine=rocksdb; + +insert into t5 select pk,pk,pk,pk from t2 where pk < 100; + +--replace_column 9 # +explain +select * from t5 where a=1 and b in (1) order by c desc; +select * from t5 where a=1 and b in (1) order by c desc; + +--replace_column 9 # +explain +select * from t5 where a=5 and b in (4) order by c desc; +select * from t5 where a=5 and b in (4) order by c desc; + +drop table t0,t1,t2,t3,t4,t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test new file mode 100644 index 0000000000000..6b8d0b90e9058 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_range2.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +# Issue#212 MyRocks chooses full index scan even if range scan is more efficient +# rocksdb_debug_optimizer_n_rows must not be set. + +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); +--disable_query_log +let $i=0; +while ($i<10000) +{ + inc $i; + eval insert t1(id1, id2, c1, c2, c3, c4, c5, c6, c7) + values($i, 0, $i, 0, 0, 0, 0, 0, 0); +} +--enable_query_log +analyze table t1; +select count(*) from t1; +explain select c1 from t1 where c1 > 5 limit 10; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test new file mode 100644 index 0000000000000..ebcc741fc175e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_row_stats.test @@ -0,0 +1,57 @@ +source include/have_rocksdb.inc; +create table t1 (a int primary key) engine=rocksdb; + +-- echo Verify rocksdb_rows_inserted +select variable_value into @old_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_rows_inserted'; +select @new_rows_inserted - @old_rows_inserted; + +-- echo Verify rocksdb_rows_updated +select variable_value into @old_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_rows_updated from information_schema.global_status where variable_name = 'rocksdb_rows_updated'; +select @new_rows_updated - @old_rows_updated; + +-- echo Verify rocksdb_rows_read +select variable_value into @old_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select * from t1; +select variable_value into @new_rows_read from information_schema.global_status where variable_name = 'rocksdb_rows_read'; +select @new_rows_read - @old_rows_read; + +-- echo Verify rocksdb_rows_deleted +select variable_value into @old_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +delete from t1; +select variable_value into @new_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_rows_deleted'; +select @new_rows_deleted - @old_rows_deleted; + +use mysql; +create table t1(a int primary key) engine=rocksdb; + +-- echo Verify rocksdb_system_rows_inserted +select variable_value into @old_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +insert into t1 values(1); +select variable_value into @new_system_rows_inserted from information_schema.global_status where variable_name = 'rocksdb_system_rows_inserted'; +select @new_system_rows_inserted - @old_system_rows_inserted; + +-- echo Verify rocksdb_system_rows_updated +select variable_value into @old_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +update t1 set a=2 where a=1; +select variable_value into @new_system_rows_updated from information_schema.global_status where variable_name = 'rocksdb_system_rows_updated'; +select @new_system_rows_updated - @old_system_rows_updated; + +-- echo Verify rocksdb_system_rows_read +select variable_value into @old_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select * from t1; +select variable_value into @new_system_rows_read from information_schema.global_status where variable_name = 'rocksdb_system_rows_read'; +select @new_system_rows_read - @old_system_rows_read; + +-- echo Verify rocksdb_system_rows_deleted +select variable_value into @old_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +delete from t1; +select variable_value into @new_system_rows_deleted from information_schema.global_status where variable_name = 'rocksdb_system_rows_deleted'; +select @new_system_rows_deleted - @old_system_rows_deleted; + +drop table t1; +use test; +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test new file mode 100644 index 0000000000000..5eaeff5cdbd0b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_table_stats_sampling_pct_change.test @@ -0,0 +1,80 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +# +# First set sampling rate to 100% and make sure that the baseline is +# correct and we get the correct number of rows as a result. +# +SET @ORIG_PCT = @@ROCKSDB_TABLE_STATS_SAMPLING_PCT; +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100; + +create table t1 (pk int primary key) engine=rocksdb; + +--disable_query_log +let $i = 0; +let $n = 10000; + +while ($i < $n) +{ + inc $i; + eval insert t1(pk) values($i); +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now = true; + +# This should return 10K rows. +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't1'; + +let $t1_len = `select data_length from information_schema.tables where table_schema = database() and table_name = 't1'`; + +drop table t1; + +--disable_warnings +drop table if exists t2; +--enable_warnings + +# +# Now, set the sampling rate to 10% and expect to see the same amount of +# rows. +# +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 10; + +create table t2 (pk int primary key) engine=rocksdb; + +--disable_query_log +let $i = 0; +let $n = 10000; + +while ($i < $n) +{ + inc $i; + eval insert t2(pk) values($i); +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now = true; + +# This should return 10K rows as well. +select table_rows from information_schema.tables +where table_schema = database() and table_name = 't2'; + +let $t2_len = `select data_length from information_schema.tables where table_schema = database() and table_name = 't2'`; +let $diff = `select abs($t1_len - $t2_len)`; + +# +# Table sizes are approximations and for this particular case we allow about +# 10% deviation. +# +if ($diff < 6000) { + select table_name from information_schema.tables where table_schema = database() and table_name = 't2'; +} + +drop table t2; + +SET GLOBAL ROCKSDB_TABLE_STATS_SAMPLING_PCT = @ORIG_PCT; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test new file mode 100644 index 0000000000000..8543ce81de403 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test @@ -0,0 +1,31 @@ +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); +CREATE TABLE t2 LIKE t1; +INSERT INTO t2 SELECT * FROM t1; + +--connect (con1,localhost,root,,) +--connect (con2,localhost,root,,) + +--connection con1 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +SAVEPOINT a; +SELECT * FROM t1 ORDER BY pk; +ROLLBACK TO SAVEPOINT a; +SAVEPOINT a; +SELECT * FROM t2 ORDER BY pk; +ROLLBACK TO SAVEPOINT a; + +# should not be blocked +--connection con2 +ALTER TABLE t1 RENAME TO t3; + +--connection default +DROP TABLE t2, t3; + +--disconnect con1 +--disconnect con2 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf new file mode 100644 index 0000000000000..13dea1236d845 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.cnf @@ -0,0 +1,14 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +sync_binlog=0 +binlog_format=row +rocksdb_read_free_rpl_tables="t.*" +slave-exec-mode=strict + +[mysqld.2] +sync_binlog=0 +binlog_format=row +rocksdb_read_free_rpl_tables="t.*" +slave-exec-mode=strict +rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test new file mode 100644 index 0000000000000..38fb3c3214911 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_read_free.test @@ -0,0 +1,302 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; + + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# initialization/insert +connection master; +--source init_stats_procedure.inc + +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4); +--source include/sync_slave_sql_with_master.inc + +--let $diff_tables= master:t1, slave:t1 + +--echo +--echo # regular update/delete. With rocks_read_free_rpl_tables=.*, rocksdb_rows_read does not increase on slaves +--echo +connection slave; +call save_read_stats(); +connection master; +update t1 set value=value+1 where id=1; +delete from t1 where id=4; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + +--echo +--echo # "rocks_read_free_rpl_tables=.*" makes "row not found error" not happen anymore +--echo +connection slave; +--source include/stop_slave.inc +delete from t1 where id in (2, 3); +--source include/start_slave.inc +call save_read_stats(); + +connection master; +update t1 set value=value+1 where id=3; +delete from t1 where id=2; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + +--echo +--echo ## tables without primary key -- read free replication should be disabled +--echo +--echo +--echo #no index +--echo +connection master; +drop table t1; +create table t1 (c1 int, c2 int); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +--source include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + +--echo +--echo #secondary index only +--echo +connection master; +drop table t1; +create table t1 (c1 int, c2 int, index i(c1)); +insert into t1 values (1,1), (2,2),(3,3),(4,4),(5,5); +--source include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; +update t1 set c2=100 where c1=3; +delete from t1 where c1 <= 2; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + + +--echo +--echo ## large row operations -- primary key modification, secondary key modification +--echo +connection master; +drop table t1; +create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2)); + +--disable_query_log +let $i=1; +while ($i<=10000) +{ + eval insert t1(id1,id2,c1,c2,c3,c4,c5,c6,c7) + values($i,0,$i,0,0,0,0,0,0); + inc $i; +} +--enable_query_log + +--source include/sync_slave_sql_with_master.inc +connection slave; +call save_read_stats(); +connection master; + +--echo +--echo #updating all seconary keys by 1 +--echo +--disable_query_log +let $i=1; +while ($i<=10000) +{ + eval update t1 set c2=c2+1 where id1=$i and id2=0; + inc $i; +} +--enable_query_log +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #updating all primary keys by 2 +--echo +connection slave; +call save_read_stats(); +connection master; +--disable_query_log +let $i=1; +while ($i<=10000) +{ + eval update t1 set id2=id2+2 where id1=$i and id2=0; + inc $i; +} +--enable_query_log +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #updating secondary keys after truncating t1 on slave +--echo +connection slave; +truncate table t1; +call save_read_stats(); +connection master; +update t1 set c2=c2+10; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #updating primary keys after truncating t1 on slave +--echo +connection slave; +truncate table t1; +call save_read_stats(); +connection master; +update t1 set id2=id2+10; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +--echo +--echo #deleting half rows +--echo +connection slave; +call save_read_stats(); +connection master; +delete from t1 where id1 <= 5000; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +connection master; +--source include/diff_tables.inc + +#--echo +#--echo # some tables with read-free replication on and some with it off +#--echo # secondary keys lose rows +#--echo +# The configuration is set up so the slave will do read-free replication on +# all tables starting with 't' +connection master; +--echo [on master] +create table t2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u2 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u2 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +--source include/sync_slave_sql_with_master.inc + +# make a mismatch between the slave and the master +connection slave; +--echo [on slave] +delete from t2 where id <= 2; +delete from u2 where id <= 2; + +# make changes on the master +connection master; +--echo [on master] +update t2 set i2=100, value=100 where id=1; +update u2 set i2=100, value=100 where id=1; + +connection slave; +--echo [on slave] +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.u2.*Error_code.*"); +call mtr.add_suppression("Slave: Can't find record in 'u2'.*"); +# wait until we have the expected error +--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND) +--source include/wait_for_slave_sql_error.inc + +# query the t2 table on the slave +connection slave; +select count(*) from t2 force index(primary); +select count(*) from t2 force index(i1); +select count(*) from t2 force index(i2); +select * from t2 where id=1; +select i1 from t2 where i1=1; +select i2 from t2 where i2=100; + +# query the u2 table on the slave +select count(*) from u2 force index(primary); +select count(*) from u2 force index(i1); +select count(*) from u2 force index(i2); +select * from u2 where id=1; +select i1 from u2 where i1=1; +select i2 from u2 where i2=100; + +# the slave replication thread stopped because of the errors; +# cleanup the problem and restart it +--disable_query_log +insert into u2 values(1,1,1,1), (2,2,2,2); +start slave sql_thread; +--source include/wait_for_slave_sql_to_start.inc +--enable_query_log + +--echo +--echo # some tables with read-free replication on and some with it off +--echo # secondary keys have extra rows +--echo +connection master; +--echo [on master] +create table t3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +create table u3 (id int primary key, i1 int, i2 int, value int, index(i1), index(i2)); +insert into t3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into u3 values (1,1,1,1),(2,2,2,2),(3,3,3,3); +--source include/sync_slave_sql_with_master.inc + +# make a mismatch between the slave and the master +connection slave; +--echo [on slave] +update t3 set i1=100 where id=1; +update u3 set i1=100 where id=1; + +# make changes on the master +connection master; +--echo [on master] +delete from t3 where id=1; +delete from u3 where id=1; + +# make sure the slave is caught up +--source include/sync_slave_sql_with_master.inc + +# query the t3 table on the slave +connection slave; +--echo [on slave] +select count(*) from t3 force index(primary); +select count(*) from t3 force index(i1); +select count(*) from t3 force index(i2); +select i1 from t3 where i1=100; + +# query the u3 table on the slave +select count(*) from u3 force index(primary); +select count(*) from u3 force index(i1); +select count(*) from u3 force index(i2); +select i1 from u3 where i1=100; + +# cleanup +connection master; +drop table t1, t2, t3, u2, u3; +--source drop_stats_procedure.inc + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf new file mode 100644 index 0000000000000..44100e59cc249 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.cnf @@ -0,0 +1,9 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row +slave_parallel_workers=4 +slave_exec_mode=SEMI_STRICT +rocksdb_lock_wait_timeout=5 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc new file mode 100644 index 0000000000000..9575abb70190d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.inc @@ -0,0 +1,98 @@ +--source include/have_rocksdb.inc +--source include/master-slave.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +create table t0 (a int) engine=myisam; +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int) engine=myisam; +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 ( + pk int primary key, + kp1 int, + kp2 int, + col1 int, + key (kp1,kp2) +) engine=rocksdb; +# Use RBR for next few statements to avoid the +# 'Unsafe statement written to the binary log' warnings. +set @tmp_binlog_format=@@binlog_format; +set @@binlog_format=ROW; +insert into t2 select a,a,a,a from t1; +create table t3 like t2; +insert into t3 select * from t2; +set binlog_format=@tmp_binlog_format; + + +# For GitHub issue#166 +# Slave is suspended at ha_rocksdb::read_range_first() -> index_read_map_impl() +# -> ha_rocksdb::get_row_by_rowid() -- which is after creating an iterator, +# Seek(), Next() (getting pk=1) +# and before GetForUpdate() and before creating a snapshot. +# Deletes remove pk=2 and pk=3, then resumes update on slave. +# The update resumes with GetForUpdate(pk=1), +# index_next() -> secondary_index_read() -> get_row_by_rowid(pk=2) +# then doesn't find a row. +# The slave should not stop with error (Can't find a record). + +--source include/sync_slave_sql_with_master.inc + +connection slave; +let $old_debug = `select @@global.debug`; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; +--source include/stop_slave.inc +--source include/start_slave.inc + +connection master; +update t2 set col1=100 where kp1 between 1 and 3 and mod(kp2,2)=0; + +connection slave; +set debug_sync= 'now WAIT_FOR Reached'; +eval set global debug_dbug = '$old_debug'; +set sql_log_bin=0; +delete from t2 where pk=2; +delete from t2 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; + +connection master; +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t2 where pk < 5; + +# For GitHub issue#162 (result file must be updated after fixing #162) +connection slave; +set global debug_dbug= 'd,dbug.rocksdb.get_row_by_rowid'; +--source include/stop_slave.inc +--source include/start_slave.inc + +connection master; +update t3 set col1=100 where kp1 between 1 and 4 and mod(kp2,2)=0; + +connection slave; +call mtr.add_suppression("Deadlock found when trying to get lock"); +set debug_sync= 'now WAIT_FOR Reached'; +eval set global debug_dbug = '$old_debug'; +set sql_log_bin=0; +delete from t3 where pk=2; +delete from t3 where pk=3; +set debug_sync= 'now SIGNAL signal.rocksdb.get_row_by_rowid_let_running'; + +connection master; +--source include/sync_slave_sql_with_master.inc +connection slave; +# col1 for pk=4 should be 100 +select * from t3 where pk < 5; + +set debug_sync='RESET'; +# Cleanup +connection master; +drop table t0, t1, t2, t3; +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test new file mode 100644 index 0000000000000..36188427585f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_not_found.test @@ -0,0 +1,4 @@ +--source include/have_binlog_format_row.inc + +--source rpl_row_not_found.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf new file mode 100644 index 0000000000000..09a1c853ffc8c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.cnf @@ -0,0 +1 @@ +!include suite/rpl/my.cnf diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test new file mode 100644 index 0000000000000..b103dfc3ef82e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_rocksdb.test @@ -0,0 +1,48 @@ +--source include/have_rocksdb.inc + +source include/master-slave.inc; +source include/have_binlog_format_row.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +select @@binlog_format; +create table t1 (pk int primary key) engine=rocksdb; +insert into t1 values (1),(2),(3); + +--source include/sync_slave_sql_with_master.inc +connection slave; + +select * from t1; + +connection master; +drop table t1; + +--echo # +--echo # Issue #18: slave crash on update with row based binary logging +--echo # +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1 where id=1; +select * from t1 where id=2; +select * from t1 where id=3; + +connection master; +drop table t1; + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt new file mode 100644 index 0000000000000..039295e140df4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats-slave.opt @@ -0,0 +1 @@ +--userstat=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf new file mode 100644 index 0000000000000..09a1c853ffc8c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.cnf @@ -0,0 +1 @@ +!include suite/rpl/my.cnf diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test new file mode 100644 index 0000000000000..db4d1ca6f9e12 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_stats.test @@ -0,0 +1,47 @@ +--source include/have_rocksdb.inc + +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +# initialization/insert +connection master; +# creating save_read_stats() and get_read_stats() procedures +--source init_stats_procedure.inc + +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3), (4,4), (5,5); +--source include/sync_slave_sql_with_master.inc + +connection slave; +call save_read_stats(); +connection master; +update t1 set value=value+1 where id=1; +update t1 set value=value+1 where id=3; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; +call save_read_stats(); + +connection master; +delete from t1 where id in (4,5); +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +call get_read_stats(); +select * from t1; + + +# cleanup +connection master; +drop table t1; +--source drop_stats_procedure.inc + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf new file mode 100644 index 0000000000000..d20d3396f0a6e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.cnf @@ -0,0 +1,19 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +gtid_mode=ON +enforce_gtid_consistency +log_slave_updates +binlog_row_image=FULL +rocksdb_read_free_rpl_tables=.* +rocksdb_strict_collation_check=0 +[mysqld.2] +binlog_format=row +gtid_mode=ON +enforce_gtid_consistency +log_slave_updates +binlog_row_image=FULL +rocksdb_read_free_rpl_tables=.* +rocksdb_strict_collation_check=0 + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test new file mode 100644 index 0000000000000..4490353b7497d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_row_triggers.test @@ -0,0 +1,262 @@ +-- source include/have_binlog_format_row.inc +-- source include/have_rbr_triggers.inc +-- source include/have_rocksdb.inc +-- source include/master-slave.inc + +-- echo # Test of row replication with triggers on the slave side +connection master; +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; + +sync_slave_with_master; + +connection slave; +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values + ('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), + ('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), + ('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_db before delete on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd0'; +create trigger t1_cnt_ib before insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; + +connection master; +--echo # INSERT triggers test +insert into t1 values ('a','b'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; + +connection master; +--echo # UPDATE triggers test +update t1 set C1= 'd'; +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; + +connection master; +--echo # DELETE triggers test +delete from t1 where C1='d'; + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; + +connection master; +insert into t1 values ('0','1'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also DELETE test +--echo # (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); + +connection master; +# Foreign key is not supported in MyRocks +#CREATE TABLE t3 (C1 CHAR(1) primary key, FOREIGN KEY (C1) REFERENCES t1(C1) ); +#insert into t1 values ('1','1'); + +#sync_slave_with_master; + +#connection slave; +#SELECT * FROM t2 order by id; + +#connection master; +#drop table t3,t1; +drop table if exists t1; + +sync_slave_with_master; + +connection slave; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; + +--connection master +CREATE TABLE t1 (i INT); +CREATE TABLE t2 (i INT); + +--sync_slave_with_master +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr=YES; +CREATE TRIGGER tr AFTER INSERT ON t1 FOR EACH ROW + INSERT INTO t2 VALUES (new.i); + +--connection master +BEGIN; +INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (2); +COMMIT; +--sync_slave_with_master +select * from t2; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +--connection master +drop tables t2,t1; + +--sync_slave_with_master + +-- echo # Triggers on slave do not work if master has some + +connection master; +CREATE TABLE t1 (C1 CHAR(1) primary key, C2 CHAR(1)); +SELECT * FROM t1; +create trigger t1_dummy before delete on t1 for each row + set @dummy= 1; + +sync_slave_with_master; + +connection slave; +SET @old_slave_exec_mode= @@global.slave_exec_mode; +SET @old_slave_run_triggers_for_rbr= @@global.slave_run_triggers_for_rbr; +SET @@global.slave_exec_mode= IDEMPOTENT; +SET @@global.slave_run_triggers_for_rbr= YES; +SELECT * FROM t1; +create table t2 (id char(2) primary key, cnt int, o char(1), n char(1)); +insert into t2 values + ('u0', 0, ' ', ' '),('u1', 0, ' ', ' '), + ('d0', 0, ' ', ' '),('d1', 0, ' ', ' '), + ('i0', 0, ' ', ' '),('i1', 0, ' ', ' '); +create trigger t1_cnt_b before update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u0'; +create trigger t1_cnt_ib before insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i0'; +create trigger t1_cnt_a after update on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=new.C1 where id = 'u1'; +create trigger t1_cnt_da after delete on t1 for each row + update t2 set cnt=cnt+1, o=old.C1, n=' ' where id = 'd1'; +create trigger t1_cnt_ia after insert on t1 for each row + update t2 set cnt=cnt+1, n=new.C1, o=' ' where id = 'i1'; +SELECT * FROM t2 order by id; + +connection master; +--echo # INSERT triggers test +insert into t1 values ('a','b'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +connection master; +--echo # UPDATE triggers test +update t1 set C1= 'd'; + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; + +connection master; +--echo # DELETE triggers test +delete from t1 where C1='d'; + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also UPDATE test (insert duplicate row) +insert into t1 values ('0','1'); +SELECT * FROM t2 order by id; + + +connection master; +insert into t1 values ('0','1'); + +sync_slave_with_master; + +connection slave; +SELECT * FROM t2 order by id; +--echo # INSERT triggers which cause also DELETE test +--echo # (insert duplicate row in table referenced by foreign key) +insert into t1 values ('1','1'); + +connection master; + +# Foreign Key is not supported in MyRocks +#CREATE TABLE t3 (C1 CHAR(1) primary key, FOREIGN KEY (C1) REFERENCES t1(C1) ); +#insert into t1 values ('1','1'); + +#sync_slave_with_master; + +#connection slave; +#SELECT * FROM t2 order by id; + +#connection master; +#drop table t3,t1; +drop table if exists t1; + +sync_slave_with_master; + +connection slave; +SET @@global.slave_exec_mode= @old_slave_exec_mode; +SET @@global.slave_run_triggers_for_rbr= @old_slave_run_triggers_for_rbr; +drop table t2; + +--echo # +--echo # MDEV-5513: Trigger is applied to the rows after first one +--echo # + +--connection master +create table t1 (a int, b int); +create table tlog (a int auto_increment primary key); +set sql_log_bin=0; +create trigger tr1 after insert on t1 for each row insert into tlog values (null); +set sql_log_bin=1; + +sync_slave_with_master; +--connection slave + +set @slave_run_triggers_for_rbr.saved = @@slave_run_triggers_for_rbr; +set global slave_run_triggers_for_rbr=1; +create trigger tr2 before insert on t1 for each row set new.b = new.a; + +--connection master +insert into t1 values (1,10),(2,20),(3,30); + +--sync_slave_with_master +select * from t1; + +--echo # +--echo # Verify slave skips running triggers if master ran and logged the row events for triggers +--echo # +--connection master +create table t4(a int, b int); +delete from tlog; +create trigger tr4 before insert on t4 for each row insert into tlog values (null); +insert into t4 values (1, 10),(2, 20); +select * from tlog; + +--sync_slave_with_master +select * from t4; +select * from tlog; + +# Cleanup +set global slave_run_triggers_for_rbr = @slave_run_triggers_for_rbr.saved; +--connection master +drop table t1, tlog, t4; +sync_slave_with_master; + +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf new file mode 100644 index 0000000000000..09a1c853ffc8c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.cnf @@ -0,0 +1 @@ +!include suite/rpl/my.cnf diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test new file mode 100644 index 0000000000000..13325cf2aa13a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test @@ -0,0 +1,91 @@ +--source include/have_rocksdb.inc + +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +create table t1 (id int primary key, value int); +insert into t1 values (1,1), (2,2), (3,3); + +begin; +insert into t1 values (11, 1); +savepoint a; +insert into t1 values (12, 1); +--error ER_UNKNOWN_ERROR +rollback to savepoint a; +--error ER_UNKNOWN_ERROR +commit; +commit; +select * from t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; + +select * from t1; + +connection master; +begin; +insert into t1 values (21, 1); +savepoint a; +insert into t1 values (22, 1); +--error ER_UNKNOWN_ERROR +rollback to savepoint a; +--error ER_UNKNOWN_ERROR +insert into t1 values (23, 1); +--error ER_UNKNOWN_ERROR +commit; +commit; +select * from t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; + + +connection master; +begin; +insert into t1 values (31, 1); +savepoint a; +insert into t1 values (32, 1); +savepoint b; +insert into t1 values (33, 1); +--error ER_UNKNOWN_ERROR +rollback to savepoint a; +--error ER_UNKNOWN_ERROR +insert into t1 values (34, 1); +rollback; +select * from t1; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; + +### GitHub Issue#195 +connection master; +SET autocommit=off; +select * from t1; +SAVEPOINT A; +select * from t1; +SAVEPOINT A; +insert into t1 values (35, 35); +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT A; +--error ER_UNKNOWN_ERROR +START TRANSACTION; +select * from t1; +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1; + + +connection master; +drop table t1; + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf new file mode 100644 index 0000000000000..6e5130c1f0170 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.cnf @@ -0,0 +1,7 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=statement +[mysqld.2] +binlog_format=mixed +rocksdb_lock_wait_timeout=5 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test new file mode 100644 index 0000000000000..39a21e67f056b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test @@ -0,0 +1,59 @@ +--source include/have_rocksdb.inc +source include/master-slave.inc; + +source include/have_binlog_format_statement.inc; + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; + +select @@binlog_format; +create table t1 (pk int primary key) engine=rocksdb; +--error ER_UNKNOWN_ERROR +insert into t1 values (1),(2),(3); + +set session rocksdb_unsafe_for_binlog=on; +insert into t1 values (1),(2),(3); +select * from t1; +delete from t1; +set session rocksdb_unsafe_for_binlog=off; + +--error ER_UNKNOWN_ERROR +insert into t1 values (1),(2),(3); + +set binlog_format=row; +insert into t1 values (1),(2),(3); + +--source include/sync_slave_sql_with_master.inc +connection slave; + +select * from t1; + +connection master; +drop table t1; + +create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into t1 values (1,1,1); +insert into t1 values (2,1,1); +insert into t1 values (3,1,1); +insert into t1 values (4,1,1); +insert into t1 values (5,1,1); +update t1 set value2=100 where id=1; +update t1 set value2=200 where id=2; +update t1 set value2=300 where id=3; + +--source include/sync_slave_sql_with_master.inc +connection slave; +select * from t1 where id=1; +select * from t1 where id=2; +select * from t1 where id=3; + +connection master; +drop table t1; +set binlog_format=row; + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf new file mode 100644 index 0000000000000..470b073d1857b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.cnf @@ -0,0 +1,9 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=statement +rocksdb_unsafe_for_binlog=1 +[mysqld.2] +binlog_format=row +slave_parallel_workers=4 +rocksdb_lock_wait_timeout=5 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test new file mode 100644 index 0000000000000..019e83acf140a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement_not_found.test @@ -0,0 +1,3 @@ +--source include/have_binlog_format_statement.inc +--source rpl_row_not_found.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc new file mode 100644 index 0000000000000..40154d9eaa72b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg.inc @@ -0,0 +1,44 @@ +# +# Random Query Generator tests +# +# Arguments needed to be set by the test when including this one: +# $TESTDIR : name of sub-directory in conf containing the data/grammar files +# $GRAMMAR_FILES: space separated list of grammar files +# $DATA_FILE: name of the data file +# + +let $MYSQL_BASEDIR = `SELECT @@BASEDIR`; +let RQG_BASE = $MYSQL_BASEDIR/rqg/rqg/common/mariadb-patches; +let MYSQL_SOCKET = `SELECT @@SOCKET`; +let GRAMMAR_FILES = $GRAMMAR_FILES; +let DATA_FILE = $DATA_FILE; +let TESTDIR = $TESTDIR; +let $TESTDB = rqg_$TESTDIR; +let TESTDB = $TESTDB; + +--eval CREATE DATABASE IF NOT EXISTS $TESTDB + +--perl + +$ENV{'RQG_HOME'}=$ENV{'RQG_BASE'}; +foreach $grammar_file (split(/ /, $ENV{'GRAMMAR_FILES'})) { + + # Errors from the gentest.pl file will be captured in the results file + my $cmd = "perl $ENV{'RQG_BASE'}/gentest.pl " . + "--dsn=dbi:mysql:host=:port=:user=root:database=$ENV{'TESTDB'}" . + ":mysql_socket=$ENV{'MYSQL_SOCKET'} " . + "--gendata=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$ENV{'DATA_FILE'} " . + "--grammar=$ENV{'RQG_BASE'}/conf/$ENV{'TESTDIR'}/$grammar_file " . + "--threads=5 --queries=10000 --duration=60 --sqltrace 2>&1 >> " . + "$ENV{'MYSQLTEST_VARDIR'}/tmp/$ENV{'TESTDB'}.log"; + + print "Running test with grammar file $grammar_file\n"; + system($cmd); + if ($? != 0) { + print ("Failure running test! Command executed: $cmd\n"); + } +} + +EOF + +--eval DROP DATABASE $TESTDB diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt new file mode 100644 index 0000000000000..5b714857e1310 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test new file mode 100644 index 0000000000000..4eb02ac648a2c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_examples.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# RQG's examples test +let $TESTDIR = examples; +let $GRAMMAR_FILES = example.yy; +let $DATA_FILE = example.zz; + +--source rqg.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt new file mode 100644 index 0000000000000..f494273892c6e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=0 --secure-file-priv=/tmp diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test new file mode 100644 index 0000000000000..d5914745219e5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test @@ -0,0 +1,53 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("Did not write failed "); +call mtr.add_suppression("Can't open and lock privilege tables"); + +SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER; + +# mysql.user and mysql.tables_priv are modified by the +# tests, so they need to be restored to the original +# state. +--disable_warnings +CREATE TABLE mysql.user_temp LIKE mysql.user; +INSERT mysql.user_temp SELECT * FROM mysql.user; +CREATE TABLE mysql.tables_priv_temp LIKE mysql.tables_priv; +INSERT mysql.tables_priv_temp SELECT * FROM mysql.tables_priv_temp; +--enable_warnings + +# RQG's runtime test +let $TESTDIR = runtime; + +let $GRAMMAR_FILES = alter_online.yy; +let $DATA_FILE = alter_online.zz; + +--source rqg.inc + +let $GRAMMAR_FILES = concurrency_1.yy; +let $DATA_FILE = concurrency_1.zz; + +--source rqg.inc + +let $GRAMMAR_FILES = connect_kill_sql.yy; +let $DATA_FILE = connect_kill_data.zz; + +--source rqg.inc + +let $GRAMMAR_FILES = metadata_stability.yy; +let $DATA_FILE = metadata_stability.zz; + +--source rqg.inc + +--disable_warnings +DELETE FROM mysql.tables_priv; +DELETE FROM mysql.user; +INSERT mysql.user SELECT * FROM mysql.user_temp; +INSERT mysql.tables_priv SELECT * FROM mysql.tables_priv_temp; +DROP TABLE mysql.user_temp; +DROP TABLE mysql.tables_priv_temp; +DROP TABLE IF EXISTS test.executors; +DROP DATABASE IF EXISTS testdb_N; +DROP DATABASE IF EXISTS testdb_S; +--enable_warnings + +SET GLOBAL EVENT_SCHEDULER = @ORIG_EVENT_SCHEDULER; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt new file mode 100644 index 0000000000000..5b714857e1310 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test new file mode 100644 index 0000000000000..f29ddcb8c8160 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_transactions.test @@ -0,0 +1,10 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("Deadlock found when trying to get lock"); + +# RQG's transactions test +let $TESTDIR = transactions; +let $GRAMMAR_FILES = transactions.yy repeatable_read.yy transaction_durability.yy transactions-flat.yy combinations.yy repeatable_read.yy transaction_durability.yy transactions-flat.yy; +let $DATA_FILE = transactions.zz; + +--source rqg.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out b/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out new file mode 100644 index 0000000000000..406e506613272 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/se-innodb.out @@ -0,0 +1 @@ +Can't open perl script "./mtr": No such file or directory diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select.test b/storage/rocksdb/mysql-test/rocksdb/t/select.test new file mode 100644 index 0000000000000..c4e1ad464a39b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select.test @@ -0,0 +1,202 @@ +--source include/have_rocksdb.inc + +# +# Basic SELECT statements +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'foobar'),(1,'z'),(200,'bar'); + +CREATE TABLE t2 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a,b) SELECT a, b FROM t1; +INSERT INTO t1 (a,b) SELECT a, b FROM t2; + +--sorted_result +SELECT * FROM t1; + +# Modifiers + +--sorted_result +SELECT DISTINCT a FROM t1; + +--sorted_result +SELECT ALL b, a FROM t1; + +# Optimizer and cache directives should not have any visible effect here, +# but we will add them for completness + +--sorted_result +SELECT STRAIGHT_JOIN SQL_CACHE t1.* FROM t2, t1 WHERE t1.a <> t2.a; + +--sorted_result +SELECT SQL_SMALL_RESULT SQL_NO_CACHE t1.a FROM t1, t2; + +--sorted_result +SELECT SQL_BIG_RESULT SQL_CALC_FOUND_ROWS DISTINCT(t2.a) + FROM t1 t1_1, t2, t1 t1_2; +SELECT FOUND_ROWS(); + +let $query_cache = `SELECT @@query_cache_size`; +SET GLOBAL query_cache_size = 1024*1024; +--sorted_result +SELECT SQL_CACHE * FROM t1, t2; +eval SET GLOBAL query_cache_size = $query_cache; + +# Combination of main clauses + +--sorted_result +SELECT a+10 AS field1, CONCAT(b,':',b) AS field2 FROM t1 +WHERE b > 'b' AND a IS NOT NULL +GROUP BY 2 DESC, field1 ASC +HAVING field1 < 1000 +ORDER BY field2, 1 DESC, field1*2 +LIMIT 5 OFFSET 1; + +# ROLLUP +--sorted_result +SELECT SUM(a), MAX(a), b FROM t1 GROUP BY b WITH ROLLUP; + +# Procedure + +--sorted_result +SELECT * FROM t2 WHERE a>0 PROCEDURE ANALYSE(); + +# SELECT INTO +let $datadir = `SELECT @@datadir`; + +--replace_result $datadir +eval +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a + INTO OUTFILE '$datadir/select.out' + CHARACTER SET utf8 + FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY ''''; +--cat_file $datadir/select.out +--remove_file $datadir/select.out + +--replace_result $datadir +--error ER_TOO_MANY_ROWS +eval +SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a + INTO DUMPFILE '$datadir/select.dump'; +--remove_file $datadir/select.dump +--replace_result $datadir +eval +SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1 + INTO DUMPFILE '$datadir/select.dump'; + +--cat_file $datadir/select.dump +--echo +--remove_file $datadir/select.dump + +SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max; +SELECT @min, @max; + +# Joins + +--sorted_result +SELECT t1_1.*, t2.* FROM t2, t1 AS t1_1, t1 AS t1_2 + WHERE t1_1.a = t1_2.a AND t2.a = t1_1.a; + +--sorted_result +SELECT alias1.* FROM ( SELECT a,b FROM t1 ) alias1, t2 WHERE t2.a IN (100,200); + +--sorted_result +SELECT t1.a FROM { OJ t1 LEFT OUTER JOIN t2 ON t1.a = t2.a+10 }; + +--sorted_result +SELECT t1.* FROM t2 INNER JOIN t1; + +--sorted_result +SELECT t1_2.* FROM t1 t1_1 CROSS JOIN t1 t1_2 ON t1_1.b = t1_2.b; + +--sorted_result +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 WHERE t1.b > t2.b; + +SELECT t1.a, t2.b FROM t2 STRAIGHT_JOIN t1 ON t1.b > t2.b ORDER BY t1.a, t2.b; + +SELECT t2.* FROM t1 LEFT JOIN t2 USING (a) ORDER BY t2.a, t2.b LIMIT 1; + +--sorted_result +SELECT t2.* FROM t2 LEFT OUTER JOIN t1 ON t1.a = t2.a WHERE t1.a IS NOT NULL; + +SELECT SUM(t2.a) FROM t1 RIGHT JOIN t2 ON t2.b = t1.b; + +SELECT MIN(t2.a) FROM t1 RIGHT OUTER JOIN t2 USING (b,a); + +--sorted_result +SELECT alias.b FROM t1 NATURAL JOIN ( SELECT a,b FROM t1 ) alias WHERE b > ''; + +--sorted_result +SELECT t2.b FROM ( SELECT a,b FROM t1 ) alias NATURAL LEFT JOIN t2 WHERE b IS NOT NULL; + +--sorted_result +SELECT t1.*, t2.* FROM t1 NATURAL LEFT OUTER JOIN t2; + +--sorted_result +SELECT t2_2.* FROM t2 t2_1 NATURAL RIGHT JOIN t2 t2_2 WHERE t2_1.a IN ( SELECT a FROM t1 ); + +--sorted_result +SELECT t1_2.b FROM t1 t1_1 NATURAL RIGHT OUTER JOIN t1 t1_2 INNER JOIN t2; + +# Subquery as scalar operand, subquery in the FROM clause + +--sorted_result +SELECT ( SELECT MIN(a) FROM ( SELECT a,b FROM t1 ) alias1 ) AS min_a FROM t2; + +# Comparison using subqueries + +--sorted_result +SELECT a,b FROM t2 WHERE a = ( SELECT MIN(a) FROM t1 ); + +--sorted_result +SELECT a,b FROM t2 WHERE b LIKE ( SELECT b FROM t1 ORDER BY b LIMIT 1 ); + +# Subquery with IN, correlated subquery + +--sorted_result +SELECT t2.* FROM t1 t1_outer, t2 WHERE ( t1_outer.a, t2.b ) IN ( SELECT a, b FROM t2 WHERE a = t1_outer.a ); + +# Subquery with ANY, ALL + +--sorted_result +SELECT a,b FROM t2 WHERE b = ANY ( SELECT b FROM t1 WHERE a > 1 ); + +--sorted_result +SELECT a,b FROM t2 WHERE b > ALL ( SELECT b FROM t1 WHERE b < 'foo' ); + +# Row subqueries + +--sorted_result +SELECT a,b FROM t1 WHERE ROW(a, b) = ( SELECT a, b FROM t2 ORDER BY a, b LIMIT 1 ); + +# Subquery with EXISTS + +--sorted_result +SELECT a,b FROM t1 WHERE EXISTS ( SELECT a,b FROM t2 WHERE t2.b > t1.b ); + +# Subquery in ORDER BY + +--sorted_result +SELECT t1.* FROM t1, t2 ORDER BY ( SELECT b FROM t1 WHERE a IS NULL ORDER BY b LIMIT 1 ) DESC; + +# Subquery in HAVING + +--sorted_result +SELECT a, b FROM t1 HAVING a IN ( SELECT a FROM t2 WHERE b = t1.b ); + +# Union + +--sorted_result +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION DISTINCT SELECT a,b FROM t1; + +--sorted_result +SELECT a,b FROM t1 UNION SELECT a,b FROM t2 UNION ALL SELECT a,b FROM t1; + + +# Cleanup +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test new file mode 100644 index 0000000000000..14fdfb7896c2c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update.test @@ -0,0 +1,55 @@ +--source include/have_rocksdb.inc + +# +# SELECT .. FOR UPDATE +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--enable_connect_log + +--source include/count_sessions.inc + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); + +--connect (con1,localhost,root,,) +BEGIN; +--sorted_result +SELECT a,b FROM t1 WHERE b='a' FOR UPDATE; + +--connection default +SET lock_wait_timeout = 1; + +# Should still be able to select + +--sorted_result +SELECT a,b FROM t1 WHERE b='a'; + +# ... but not with LOCK IN SHARE MODE + +--sorted_result +--error ER_LOCK_WAIT_TIMEOUT +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; + +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET b='c' WHERE b='a'; + +--connection con1 +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +--disconnect con1 +--connection default +# Now it can be updated all right +UPDATE t1 SET b='c' WHERE b='a'; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test new file mode 100644 index 0000000000000..c8548d968888a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test @@ -0,0 +1,46 @@ +############################################################################## +## SKIP LOCKED | NOWAIT are *not* supported for SELECT...FOR UPDATE in RocksDB + +--disable_warnings +drop table if exists t1; +--enable_warnings + +create table t1 (a int primary key) engine=rocksdb; + +insert into t1 values (1), (2), (3); + +### SKIP LOCKED + +--echo Should succeed since no table gets involved +select 1 for update skip locked; + +--error ER_NO_SUCH_TABLE +select * from nonexistence for update skip locked; + +--error ER_ILLEGAL_HA +select * from t1 for update skip locked; + +--error ER_ILLEGAL_HA +select * from t1 where a > 1 and a < 3 for update skip locked; + +--error ER_ILLEGAL_HA +insert into t1 select * from t1 for update skip locked; + +### NOWAIT + +--echo Should succeed since no table gets involved +select 1 for update nowait; + +--error ER_NO_SUCH_TABLE +select * from nonexistence for update nowait; + +--error ER_ILLEGAL_HA +select * from t1 for update nowait; + +--error ER_ILLEGAL_HA +select * from t1 where a > 1 and a < 3 for update nowait; + +--error ER_ILLEGAL_HA +insert into t1 select * from t1 for update nowait; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test new file mode 100644 index 0000000000000..23ce6d4523443 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_lock_in_share_mode.test @@ -0,0 +1,54 @@ +--source include/have_rocksdb.inc + +# +# SELECT .. LOCK IN SHARE MODE +# +# If the engine has its own lock timeouts, +# it makes sense to set them to minimum to decrease +# the duration of the test. + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--enable_connect_log + +--source include/count_sessions.inc + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'a'); + +--connect (con1,localhost,root,,) +BEGIN; +--sorted_result +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; + +--connection default +SET lock_wait_timeout = 1; + +# Should still be able to select + +--sorted_result +SELECT a,b FROM t1 WHERE b='a'; +--sorted_result +SELECT a,b FROM t1 WHERE b='a' LOCK IN SHARE MODE; + +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET b='c' WHERE b='a'; + +--connection con1 +COMMIT; +--sorted_result +SELECT a,b FROM t1; + +--disconnect con1 +--connection default +# Now it can be updated all right +UPDATE t1 SET b='c' WHERE b='a'; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc new file mode 100644 index 0000000000000..283afd3d5f83d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc @@ -0,0 +1,30 @@ +# Usage: +# let $checkpoint = ; +# let $succeeds = <1 if checkpoint creation should succeed, 0 otherwise>; +# --source set_checkpoint.inc + + +if ($succeeds) +{ + # Create checkpoint + --replace_result '$checkpoint' [CHECKPOINT] + eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint'; + + # Check checkpoint + --list_files $checkpoint CURRENT + + # Cleanup + --remove_files_wildcard $checkpoint * + --rmdir $checkpoint + --disable_abort_on_error + --enable_abort_on_error +} +if (!$succeeds) +{ + --disable_result_log + --disable_query_log + --error ER_UNKNOWN_ERROR + eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint'; + --enable_query_log + --enable_result_log +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test new file mode 100644 index 0000000000000..914bc4b100215 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -0,0 +1,83 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc + +--source include/restart_mysqld.inc + +# +# SHOW ENGINE STATUS command +# Checking that the command doesn't produce an error. +# If it starts producing an actual result, the result file +# will need to be updated, and possibly masked. + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +--enable_warnings + +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; +CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB; +CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; + +# With partition based column family creation we now expect all the partitions +# to belong to a default column family because mapping wasn't specified in +# this case. +CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB + PARTITION BY KEY(l) PARTITIONS 4; + +--replace_column 3 # +SHOW ENGINE rocksdb STATUS; + +INSERT INTO t1 VALUES (1), (2), (3); +SELECT COUNT(*) FROM t1; + +INSERT INTO t2 VALUES (1), (2), (3), (4); +SELECT COUNT(*) FROM t2; + +INSERT INTO t4 VALUES (1), (2), (3), (4), (5); +SELECT COUNT(*) FROM t4; + +# Fetch data from information schema as well +--replace_column 3 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CFSTATS; + +--replace_column 2 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_DBSTATS; + +SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, COUNT(STAT_TYPE) +FROM INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT +WHERE TABLE_SCHEMA = 'test' +GROUP BY TABLE_NAME, PARTITION_NAME; + +--replace_column 3 # +SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; + +SHOW ENGINE rocksdb MUTEX; +# For SHOW ALL MUTEX even the number of lines is volatile, so the result logging is disabled +--disable_result_log +SHOW ENGINE ALL MUTEX; +--enable_result_log + +# The output from SHOW ENGINE ROCKSDB TRANSACTION STATUS has some +# non-deterministic results. Replace the timestamp with 'TIMESTAMP', the +# number of seconds active with 'NUM', the thread id with 'TID' and the thread +# pointer with 'PTR'. This test may fail in the future if it is being run in +# parallel with other tests as the number of snapshots would then be greater +# than expected. We may need to turn off the result log if that is the case. +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +#select sleep(10); +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +ROLLBACK; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt new file mode 100644 index 0000000000000..843f7012cfa3e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status-master.opt @@ -0,0 +1,3 @@ +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_table_stats_sampling_pct=100 +--userstat=ON diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test new file mode 100644 index 0000000000000..a293b9ee6b8fa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_table_status.test @@ -0,0 +1,65 @@ +--source include/have_rocksdb.inc +--source include/not_windows.inc # decorated database names is too long, exceeded OS limits + +# +# SHOW TABLE STATUS statement +# + +################################### +# TODO: +# The result file is likely to change +# if MDEV-4197 is fixed +################################### + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (100,'a'),(2,'foo'); + +CREATE TABLE t2 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t2 (a,b) VALUES (1,'bar'); + +set global rocksdb_force_flush_memtable_now = true; + +CREATE TABLE t3 (a INT, b CHAR(8), pk INT PRIMARY KEY) ENGINE=rocksdb CHARACTER SET utf8; + +--replace_column 6 # 7 # +SHOW TABLE STATUS WHERE name IN ( 't1', 't2', 't3' ); + +# Some statistics don't get updated as quickly. The Data_length and +# Avg_row_length are trailing statistics, meaning they don't get updated +# for the current SST until the next SST is written. Insert a bunch of data, +# then flush, then insert a bit more and do another flush to get them to show +# up. + +--disable_query_log +let $count = 2; +let $max = 10000; +while ($count < $max) { + eval INSERT INTO t2 (a) VALUES ($count); + inc $count; +} + +set global rocksdb_force_flush_memtable_now = true; +eval INSERT INTO t2 (a) VALUES ($max); +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log + +# We expect the number of rows to be 10000. Data_len and Avg_row_len +# may vary, depending on built-in compression library. +--replace_column 6 # 7 # +SHOW TABLE STATUS WHERE name LIKE 't2'; +DROP TABLE t1, t2, t3; + +# +# Confirm that long db and table names work. +# + +CREATE DATABASE `db_new..............................................end`; +USE `db_new..............................................end`; +CREATE TABLE `t1_new..............................................end`(a int) engine=rocksdb; +INSERT INTO `t1_new..............................................end` VALUES (1); +--query_vertical SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.table_statistics WHERE TABLE_NAME = 't1_new..............................................end' +DROP DATABASE `db_new..............................................end`; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt new file mode 100644 index 0000000000000..d6c7939eae670 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/shutdown-master.opt @@ -0,0 +1 @@ +--log-bin --binlog_format=row --rocksdb_default_cf_options=write_buffer_size=64k diff --git a/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test b/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test new file mode 100644 index 0000000000000..ba625deb5141b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/shutdown.test @@ -0,0 +1,36 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Ensure bin log is enabled. +SHOW GLOBAL VARIABLES LIKE "log_bin"; + +# Create the table and insert some keys +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; + +--disable_query_log +let $max = 1000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i); + inc $i; + eval $insert; +} +--enable_query_log + +# Restart the server +let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; +--exec echo "wait" > $restart_file +--shutdown_server 10 +--source include/wait_until_disconnected.inc +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +# Verify table has correct rows +SELECT COUNT(*) FROM t1; + +#cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt new file mode 100644 index 0000000000000..72b3af6bcf739 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=16k diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test new file mode 100644 index 0000000000000..718f6b7202eeb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test @@ -0,0 +1,89 @@ +--source include/have_rocksdb.inc + +# only SingleDelete increases +CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t1 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t1 SET value=value+1 WHERE value=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t1; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + + +# both SingleDelete and Delete increases +CREATE TABLE t2 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; +INSERT INTO t2 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t2 SET id=id+1 WHERE id=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t2; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +# only Delete increases +CREATE TABLE t3 (id INT, value int, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t3 VALUES (1,1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t3 SET id=id+1 WHERE id=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t3; +select case when variable_value-@s = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d > 9000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +# only SingleDelete increases +CREATE TABLE t4 (id INT, PRIMARY KEY (id)) ENGINE=RocksDB; +INSERT INTO t4 VALUES (1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t4 SET id=id+1 WHERE id=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t4; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +# only SingleDelete increases +CREATE TABLE t5 (id1 INT, id2 INT, PRIMARY KEY (id1, id2), INDEX(id2)) ENGINE=RocksDB; +INSERT INTO t5 VALUES (1, 1); +select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select variable_value into @d from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; +--disable_query_log +let $i = 1; +while ($i <= 10000) { + let $update = UPDATE t5 SET id1=id1+1 WHERE id1=$i; + inc $i; + eval $update; +} +--enable_query_log +optimize table t5; +select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; +select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; + +DROP TABLE t1, t2, t3, t4, t5; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt new file mode 100644 index 0000000000000..fc5c3ed4c7afe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log-master.opt @@ -0,0 +1 @@ +--log-slow-extra --rocksdb-perf-context-level=2 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test new file mode 100644 index 0000000000000..9f1694ab8bd2a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/slow_query_log.test @@ -0,0 +1,37 @@ +--source include/have_rocksdb.inc +#Unixism (exec awk) +-- source include/not_windows.inc + +SET @cur_long_query_time = @@long_query_time; +# Set the long query time to something big so that nothing unexpected gets into it +SET @@long_query_time = 600; +# Test the slow query log feature + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (id INT PRIMARY KEY, value INT) ENGINE=ROCKSDB; + +--disable_query_log +let $max = 10000; +let $i = 1; +while ($i < $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} + +DELETE FROM t1 WHERE id < 2500; +--enable_query_log + +SET @@long_query_time = 0; +# we expect this query to be reflected in the slow query log +SELECT COUNT(*) FROM t1; + +SET @@long_query_time = @cur_long_query_time; + +# Verify the output of the slow query log contains counts for the skipped keys +--exec awk -f suite/rocksdb/slow_query_log.awk $MYSQLTEST_VARDIR/mysqld.1/mysqld-slow.log + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt new file mode 100644 index 0000000000000..8a56deb029907 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics-master.opt @@ -0,0 +1,3 @@ +--rocksdb_default_cf_options=max_write_buffer_number_to_maintain=10 +--rocksdb_debug_optimizer_n_rows=1000 +--rocksdb_table_stats_sampling_pct=100 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/statistics.test b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test new file mode 100644 index 0000000000000..70fc2f72b7ec0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/statistics.test @@ -0,0 +1,74 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +--enable_warnings + +# table with index in default CF +create table t1( + id bigint not null primary key auto_increment, + a varchar(255) not null, + b bigint, + index t1_1(b) +) engine=rocksdb; + +# a table with index in a different CF +create table t2( + id bigint not null primary key auto_increment, + a varchar(255) not null, + b bigint, + index t2_1(b) comment 'cf_t3' +) engine=rocksdb; + +# a table wint index in a reverse CF +create table t3( + id bigint not null primary key auto_increment, + a varchar(255) not null, + b bigint, + index t3_1(b) comment 'rev:cf_t4' +) engine=rocksdb; + +--disable_query_log +let $i=0; +while ($i<100000) +{ + inc $i; + eval insert t1(a,b) values(concat('a',$i,'b',$i,'c',$i), $i); + if ($i<5000) + { + eval insert t2(a,b) values(concat('a',$i,'b',$i,'c',$i), $i); + eval insert t3(a,b) values(concat('a',$i,'b',$i,'c',$i), $i); + } +} +--enable_query_log + +# should have some statistics before the memtable flush +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE() and table_name <> 't1'; + +# due to inconsistencies in when the memtable is flushed, just verify t1 has fewer +# than the expected number of rows. +SELECT CASE WHEN table_rows < 100000 then 'true' else 'false' end from information_schema.tables where table_name = 't1'; + +# flush and get even better statistics +set global rocksdb_force_flush_memtable_now = true; +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); + +# restart the server, check the stats +--source include/restart_mysqld.inc + +# give the server a chance to load in statistics +--sleep 5 + +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); + +analyze table t1,t2,t3,t4,t5; + +# make sure that stats do not change after calling analyze table +SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE(); +SELECT table_name, data_length>0, index_length>0 FROM information_schema.tables WHERE table_schema = DATABASE(); + +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt new file mode 100644 index 0000000000000..be8a06eacae4c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/table_stats-master.opt @@ -0,0 +1 @@ +--userstat diff --git a/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test new file mode 100644 index 0000000000000..3eb58098372dd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/table_stats.test @@ -0,0 +1,29 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Create the table and insert some keys +CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB; + +--disable_query_log +let $max = 1000; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i); + inc $i; + eval $insert; +} +--enable_query_log + +# Verify table has correct rows +SELECT COUNT(*) FROM t1; + +# Verify the table stats are returned +--vertical_results +SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; +--horizontal_results + +#cleanup +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test new file mode 100644 index 0000000000000..8fb4539b4018a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_ai.test @@ -0,0 +1,29 @@ +--source include/have_rocksdb.inc + +# +# Check whether AUTO_INCREMENT option +# is supported in CREATE and ALTER TABLE +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb AUTO_INCREMENT=10; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1; + +ALTER TABLE t1 AUTO_INCREMENT=100; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; + +ALTER TABLE t1 AUTO_INCREMENT=50; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (NULL); +SELECT * FROM t1 ORDER BY a; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test new file mode 100644 index 0000000000000..3e6797a8686a3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_avg_row_length.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether AVG_ROW_LENGTH option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb AVG_ROW_LENGTH=300; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 AVG_ROW_LENGTH=30000000; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test new file mode 100644 index 0000000000000..3b49b967937d5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_checksum.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +# +# Check whether CHECKSUM option is supported +# in CREATE and ALTER TABLE. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CHECKSUM=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 CHECKSUM=0; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test new file mode 100644 index 0000000000000..b97b3dd9d4cbc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_connection.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +# +# Check whether CONNECTION option is supported +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE DATABASE test_remote; +CREATE SERVER test_connection FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); +CREATE SERVER test_connection2 FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST 'localhost', DATABASE 'test_remote'); + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb CONNECTION='test_connection'; +SHOW CREATE TABLE t1; +ALTER TABLE t1 CONNECTION='test_connection2'; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +DROP SERVER test_connection; +DROP SERVER test_connection2; +DROP DATABASE test_remote; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test new file mode 100644 index 0000000000000..99cb2253d94d0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_data_index_dir.test @@ -0,0 +1,46 @@ +--source include/have_rocksdb.inc + +--source include/have_partition.inc +--source include/not_windows.inc + +# +# Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified +# then MyRocks returns an appropriate error. We don't support this +# functionality and therefore shouldn't just silently accept the values. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--error ER_CANT_CREATE_TABLE +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; +show warnings; + +--error ER_CANT_CREATE_TABLE +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; +show warnings; + +# +# Verify that we'll get the same error codes when using the partitions. +# + +--error ER_CANT_CREATE_TABLE +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) +( + PARTITION P0 VALUES LESS THAN (1000) + DATA DIRECTORY = '/foo/bar/data/', + PARTITION P1 VALUES LESS THAN (2000) + DATA DIRECTORY = '/foo/bar/data/', + PARTITION P2 VALUES LESS THAN (MAXVALUE) +); + +--error ER_CANT_CREATE_TABLE +CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) +( + PARTITION P0 VALUES LESS THAN (1000) + INDEX DIRECTORY = '/foo/bar/data/', + PARTITION P1 VALUES LESS THAN (2000) + INDEX DIRECTORY = '/foo/bar/data/', + PARTITION P2 VALUES LESS THAN (MAXVALUE) +); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test new file mode 100644 index 0000000000000..85cd45e969d1e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_delay_key_write.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether DELAY_KEY_WRITE option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DELAY_KEY_WRITE=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DELAY_KEY_WRITE=0; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test new file mode 100644 index 0000000000000..e289827ac7213 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_insert_method.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether INSERT_METHOD option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted (and apparently ignored) +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INSERT_METHOD=FIRST; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 INSERT_METHOD=NO; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test new file mode 100644 index 0000000000000..d927c785ae924 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_key_block_size.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether KEY_BLOCK_SIZE option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb KEY_BLOCK_SIZE=8; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test new file mode 100644 index 0000000000000..35aa0f4dafaa0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_max_rows.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether MAX_ROWS option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MAX_ROWS=10000000; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MAX_ROWS=30000000; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test new file mode 100644 index 0000000000000..d62a8771ea31f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_min_rows.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether MIN_ROWS option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb MIN_ROWS=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 MIN_ROWS=10000; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test new file mode 100644 index 0000000000000..acdb612b4d4fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_pack_keys.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether PACK KEYS option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PACK_KEYS=1; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 PACK_KEYS=0; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test new file mode 100644 index 0000000000000..e897992e93307 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_password.test @@ -0,0 +1,27 @@ +--source include/have_rocksdb.inc + +# +# Check whether PASSWORD option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# +# This option is not supported by any known engines, +# that's why the result file does not contain it; +# but it's syntactically acceptable. +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb PASSWORD='password'; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 PASSWORD='new_password'; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test new file mode 100644 index 0000000000000..de834d238efd9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_row_format.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +# +# Check whether ROW_FORMAT option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8) PRIMARY KEY) ENGINE=rocksdb ROW_FORMAT=FIXED; +SHOW CREATE TABLE t1; + +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test new file mode 100644 index 0000000000000..d3c371b18c7a9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_opt_union.test @@ -0,0 +1,28 @@ +--source include/have_rocksdb.inc + +# +# Check whether UNION option +# is supported in CREATE and ALTER TABLE +# +# Note: the test does not check whether the option +# has any real effect on the table, only +# that it's accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1, child1, child2; +--enable_warnings + +--disable_query_log +CREATE TABLE child1 (a INT PRIMARY KEY) ENGINE=MyISAM; +CREATE TABLE child2 (a INT PRIMARY KEY) ENGINE=MyISAM; +--enable_query_log + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb UNION(child1); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 UNION = (child1,child2); +SHOW CREATE TABLE t1; + +DROP TABLE t1, child1, child2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test b/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test new file mode 100644 index 0000000000000..5d60c02a7e644 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tbl_standard_opts.test @@ -0,0 +1,42 @@ +--source include/have_rocksdb.inc + +# +# Standard options in CREATE and ALTER TABLE +# +# Note: the test does not check whether the options +# have any real effect on the table, only +# that they are accepted +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Create table with standard options + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb + DEFAULT CHARACTER SET = utf8 + COLLATE = utf8_general_ci + COMMENT = 'standard table options' +; +SHOW CREATE TABLE t1; + +# Alter comment + +ALTER TABLE t1 COMMENT = 'table altered'; +SHOW CREATE TABLE t1; + +# Alter ENGINE value + +ALTER TABLE t1 ENGINE=MEMORY; +SHOW CREATE TABLE t1; +ALTER TABLE t1 ENGINE=rocksdb; +SHOW CREATE TABLE t1; + +# Alter character set and collation + +ALTER TABLE t1 CHARACTER SET = latin1 COLLATE = latin1_swedish_ci; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test b/storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test new file mode 100644 index 0000000000000..28615264d35e2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/tmpdir.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +--echo # If rocksdb_tmpdir is NULL or "", temporary file will be created in +--echo # server configuration variable location(--tmpdir) + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +show session variables like 'rocksdb_tmpdir'; +--echo # Connection con1 +connect (con1,localhost,root); +show session variables like 'rocksdb_tmpdir'; + +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +connection default; +disconnect con1; +DROP TABLE t1; + +--echo # rocksdb_tmpdir with valid location. + +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +set @tmpdir = @@global.tmpdir; +set global rocksdb_tmpdir = @tmpdir; +show session variables like 'rocksdb_tmpdir'; + +--echo # Connection con3 +connect (con2,localhost,root); +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +show session variables like 'rocksdb_tmpdir'; + +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +disconnect con2; +connection default; +set global rocksdb_tmpdir=NULL; +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction.test b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test new file mode 100644 index 0000000000000..a76fa8f9871d6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction.test @@ -0,0 +1,105 @@ +--source include/have_rocksdb.inc + +create table t1 (id int primary key, value int, value2 varchar(100), index(value)) engine=rocksdb; + +insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(8,8,8),(9,9,9),(10,10,10); + +# insert +begin; +insert into t1 values (11,11,11); +--source transaction_select.inc +rollback; + +# insert in the middle +begin; +insert into t1 values (7,7,7); +--source transaction_select.inc +rollback; + +# update non-index column by primary key +begin; +update t1 set value2=100 where id=1; +--source transaction_select.inc +rollback; + +# update secondary key by primary key +begin; +update t1 set value=100 where id=1; +--source transaction_select.inc +rollback; + +# update primary key by primary key +begin; +update t1 set id=100 where id=1; +--source transaction_select.inc +rollback; + +# update non-index column key by secondary key +begin; +update t1 set value2=100 where value=1; +--source transaction_select.inc +rollback; + +# update secondary key by secondary key +begin; +update t1 set value=100 where value=1; +--source transaction_select.inc +rollback; + +# update primary key by secondary key +begin; +update t1 set id=100 where value=1; +--source transaction_select.inc +rollback; + +# update non-index column by non-index column +begin; +update t1 set value2=100 where value2=1; +--source transaction_select.inc +rollback; + +# update secondary key by non-index column +begin; +update t1 set value=100 where value2=1; +--source transaction_select.inc +rollback; + +# update primary key column by non-index column +begin; +update t1 set id=100 where value2=1; +--source transaction_select.inc +rollback; + + +# delete by primary key +begin; +delete from t1 where id=1; +--source transaction_select.inc +rollback; + +# delete by secondary key +begin; +delete from t1 where value=1; +--source transaction_select.inc +rollback; + +# delete by non-index column +begin; +delete from t1 where value2=1; +--source transaction_select.inc +rollback; + +# mixed +begin; +insert into t1 values (11,11,11); +insert into t1 values (12,12,12); +insert into t1 values (13,13,13); +delete from t1 where id=9; +delete from t1 where value=8; +update t1 set id=100 where value2=5; +update t1 set value=103 where value=4; +update t1 set id=115 where id=3; +--source transaction_select.inc +rollback; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc new file mode 100644 index 0000000000000..dbd1d90622fad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_isolation.inc @@ -0,0 +1,150 @@ +# +# Basic check for transaction isolation. +# The results should be different depending on the isolation level. +# For some isolation levels, some statements will end with a timeout. +# If the engine has its own timeout parameters, reduce them to minimum, +# otherwise the test will take very long. +# If the timeout value is greater than the testcase-timeout the test is run with, +# it might fail due to the testcase timeout. +# + +--enable_connect_log + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con2,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +connection con1; + +CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +START TRANSACTION; +--sorted_result +SELECT a FROM t1; # First snapshot + +connection con2; + +BEGIN; +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) VALUES(1); + +connection con1; +--sorted_result +SELECT a FROM t1; # Second snapshot + +connection con2; +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) VALUES (2); + +connection con1; +--sorted_result +SELECT a FROM t1; # Third snapshot + +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) SELECT a+100 FROM t1; + +--sorted_result +SELECT a FROM t1; + +connection con2; +--sorted_result +SELECT a FROM t1; # Inside the transaction +COMMIT; +--sorted_result +SELECT a FROM t1; # Outside the transaction + +connection con1; +--sorted_result +SELECT a FROM t1; # Inside the transaction + +# Note: INSERT .. SELECT might be tricky, for example for InnoDB +# even with REPEATABLE-READ it works as if it is executed with READ COMMITTED. +# The test will have a 'logical' result for repeatable read, even although +# we currently don't have an engine which works this way. + +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 (a) SELECT a+200 FROM t1; + +--sorted_result +SELECT a FROM t1; +COMMIT; +--sorted_result +SELECT a FROM t1; # Outside the transaction + +connection con2; +--sorted_result +SELECT a FROM t1; # After both transactions have committed + +# Now test with an error in one statement to make sure the snapshots are +# Held/released when expected +connection default; +CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (a) VALUES (1); +COMMIT; + +connection con1; +BEGIN; +--sorted_result +SELECT a from t2; +--error ER_DUP_ENTRY +INSERT INTO t2 (a) VALUES (1), (3); # failure + +connection con2; +--error 0,ER_LOCK_WAIT_TIMEOUT +INSERT INTO t2 (a) VALUES (2); +COMMIT; + +connection con1; +--sorted_result +SELECT a from t2; +COMMIT; + +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t1; +DROP TABLE t2; + +CREATE TABLE t3 ( + pk int unsigned PRIMARY KEY, + count int unsigned DEFAULT '0' +) ENGINE=ROCKSDB; + +connect (con1,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; +connect (con2,localhost,root,,); +eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; + +connection con1; +BEGIN; +SELECT * FROM t3; + +connection con2; +BEGIN; +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; + +connection con1; +--error 0,ER_LOCK_DEADLOCK +INSERT INTO t3 (pk) VALUES(1) ON DUPLICATE KEY UPDATE count=count+1; +COMMIT; + +# count will be 0 for repeatable read (because the last insert failed) +# and 1 for read committed +SELECT count FROM t3; + +connection default; +disconnect con1; +disconnect con2; +DROP TABLE t3; + +--source include/wait_until_count_sessions.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc new file mode 100644 index 0000000000000..932a450e7c248 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/transaction_select.inc @@ -0,0 +1,14 @@ +select * from t1 where id=1; +select * from t1 where value=1; +select value from t1 where value=1; +select * from t1 where value2=1; +select * from t1 where id=5; +select * from t1 where value=5; +select value from t1 where value=5; +select * from t1 where value2=5; +select * from t1 where id < 3 order by id; +select * from t1 where value < 3 order by id; +select value from t1 where value < 3 order by id; +select * from t1 where value2 < 3 order by id; +select * from t1 order by id; +select value from t1 order by id; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test new file mode 100644 index 0000000000000..a61488654a368 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table.test @@ -0,0 +1,74 @@ +--source include/have_rocksdb.inc + +# +# TRUNCATE TABLE +# + +######################################## +# TODO: +# A part of the test is disabled because +# HANDLER is not supported. If it ever +# changes, the test will complain about +# NOT producing ER_ILLEGAL_HA +######################################## + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +TRUNCATE TABLE t1; +INSERT INTO t1 (a,b) VALUES (1,'a'), (2,'b'), (3,'c'); +TRUNCATE TABLE t1; +SELECT a,b FROM t1; +DROP TABLE t1; + + +# Truncate resets auto-increment value on the table + +CREATE TABLE t1 (a INT KEY AUTO_INCREMENT, c CHAR(8)) ENGINE=rocksdb; + +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +INSERT INTO t1 (c) VALUES ('a'),('b'),('c'); +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +TRUNCATE TABLE t1; +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +INSERT INTO t1 (c) VALUES ('d'); +#--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # +--replace_column 5 # 6 # 7 # +SHOW TABLE STATUS LIKE 't1'; + +--sorted_result +SELECT a,c FROM t1; +DROP TABLE t1; + +# Truncate closes handlers + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'); + +--error ER_ILLEGAL_HA +HANDLER t1 OPEN AS h1; + +--disable_parsing + +HANDLER h1 READ FIRST; +TRUNCATE TABLE t1; +--error ER_UNKNOWN_TABLE +HANDLER h1 READ NEXT; +HANDLER t1 OPEN AS h2; +HANDLER h2 READ FIRST; + +--enable_parsing + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt new file mode 100644 index 0000000000000..a9ebc4ec20b96 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3-master.opt @@ -0,0 +1,2 @@ +--rocksdb_max_subcompactions=1 +--rocksdb_default_cf_options=write_buffer_size=16k;target_file_size_base=16k;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;compression_per_level=kNoCompression; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test new file mode 100644 index 0000000000000..b3f95f812b348 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/truncate_table3.test @@ -0,0 +1,5 @@ +--source include/have_rocksdb.inc + +-- let $truncate_table = 1 +-- let $drop_table = 0 +-- source drop_table3.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info.test new file mode 100644 index 0000000000000..975bed6132c88 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1 (a int) engine=rocksdb; +insert into t1 values (1); +insert into t1 values (2); + +set autocommit=0; +select * from t1 for update; + +--replace_column 1 _TRX_ID_ 3 _NAME_ 7 _KEY_ 14 _THREAD_ID_ +select * from information_schema.rocksdb_trx; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf new file mode 100644 index 0000000000000..46771b5a67fce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.cnf @@ -0,0 +1,11 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +loose-enable-rocksdb_trx + +[mysqld.2] +binlog_format=row +slave_parallel_workers=1 +rpl_skip_tx_api=ON +loose-enable-rocksdb_trx diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test new file mode 100644 index 0000000000000..dc444f7d30809 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test @@ -0,0 +1,43 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +connection slave; +--source include/stop_slave.inc +--enable_warnings + +connection master; +create table t1 (a int) engine=rocksdb; +--disable_query_log +--let $aa= 0 +while ($aa < 1000) { + eval insert into t1 values ($aa); + --inc $aa +} +--enable_query_log + +connection slave; +show variables like 'rpl_skip_tx_api'; +--source include/start_slave.inc + +--let $it=0 +--let $stop=0 +while ($stop != 1) { +let $count= query_get_value(select count(*) as Value from information_schema.rocksdb_trx, Value, 1); + if ($count) { + --echo found + --let $stop=1 + } + + if ($it > 1000) { + --echo not found + --let $stop=1 + } + + --inc $it +} + +connection master; +DROP TABLE t1; +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc new file mode 100644 index 0000000000000..8e2b0e8549b04 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.inc @@ -0,0 +1,45 @@ +# +# BINARY column types +# + +# $extra_col_options is set by a calling test + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + b BINARY $extra_col_opts, + b0 BINARY(0) $extra_col_opts, + b1 BINARY(1) $extra_col_opts, + b20 BINARY(20) $extra_col_opts, + b255 BINARY(255) $extra_col_opts, + pk BINARY PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values + +INSERT INTO t1 VALUES ('','','','','',''); +INSERT INTO t1 VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.','a'); + +--sorted_result +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; + +# Invalid values + +INSERT INTO t1 VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256),'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 SELECT b255, b255, b255, b255, CONCAT('a',b255,b255), 'c' FROM t1; + +--sorted_result +SELECT HEX(b), HEX(b0), HEX(b1), HEX(b20), HEX(b255), HEX(pk) FROM t1 ORDER BY pk; + +--error ER_TOO_BIG_FIELDLENGTH +eval ALTER TABLE t1 ADD COLUMN b257 BINARY(257) $extra_col_opts; + +SHOW COLUMNS IN t1; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test new file mode 100644 index 0000000000000..91749e36a2ec7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BINARY column types +# + +--source type_binary.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt new file mode 100644 index 0000000000000..6ad42e58aa22c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test new file mode 100644 index 0000000000000..f4360ed629bef --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_binary_indexes.test @@ -0,0 +1,99 @@ +--source include/have_rocksdb.inc + +# +# BINARY and VARBINARY columns with indexes +# + +####################################### +# TODO: +# A part of the test is disabled +# because unique keys are not supported +####################################### + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (b BINARY, + b20 BINARY(20) PRIMARY KEY, + v16 VARBINARY(16), + v128 VARBINARY(128) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT HEX(b20) FROM t1 ORDER BY b20; +SELECT HEX(b20) FROM t1 ORDER BY b20; + +--replace_column 9 # +EXPLAIN SELECT HEX(b20) FROM t1 IGNORE INDEX (PRIMARY) ORDER BY b20 DESC; +SELECT HEX(b20) FROM t1 ORDER BY b20 DESC; + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 (b BINARY, + b20 BINARY(20), + v16 VARBINARY(16), + v128 VARBINARY(128), + UNIQUE INDEX b_v (b,v128), + pk VARBINARY(10) PRIMARY KEY +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,b20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT HEX(b), HEX(v128) FROM t1 WHERE b != 'a' AND v128 > 'varchar'; +--sorted_result +SELECT HEX(b), HEX(v128) FROM t1 WHERE b != 'a' AND v128 > 'varchar'; + +--replace_column 9 # +EXPLAIN SELECT HEX(b), HEX(v128) FROM t1 USE INDEX (b_v) WHERE b != 'a' AND v128 > 'varchar'; +--sorted_result +SELECT HEX(b), HEX(v128) FROM t1 USE INDEX (b_v) WHERE b != 'a' AND v128 > 'varchar'; + +--replace_column 9 # +EXPLAIN SELECT HEX(v128), COUNT(*) FROM t1 GROUP BY HEX(v128); +--sorted_result +SELECT HEX(v128), COUNT(*) FROM t1 GROUP BY HEX(v128); + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 (b BINARY, + b20 BINARY(20), + v16 VARBINARY(16), + v128 VARBINARY(128), + pk VARBINARY(10) PRIMARY KEY, + INDEX (v16(10)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,b20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b',1),('a','char2','varchar2a','varchar2b',2),('b','char3','varchar1a','varchar1b',3),('c','char4','varchar3a','varchar3b',4),('d','char5','varchar4a','varchar3b',5),('e','char6','varchar2a','varchar3b',6); +INSERT INTO t1 (b,b20,v16,v128,pk) SELECT b,b20,v16,v128,pk+100 FROM t1; + +--replace_column 9 # +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 WHERE v16 LIKE 'varchar%'; + +--replace_column 9 # +EXPLAIN SELECT HEX(SUBSTRING(v16,0,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT HEX(SUBSTRING(v16,7,3)) FROM t1 FORCE INDEX (v16) WHERE v16 LIKE 'varchar%'; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc new file mode 100644 index 0000000000000..ba0c653740451 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.inc @@ -0,0 +1,53 @@ +# +# BIT column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Valid values + +eval CREATE TABLE t1 ( + a BIT $extra_col_opts, + b BIT(20) $extra_col_opts, + c BIT(64) $extra_col_opts, + d BIT(1) $extra_col_opts, + PRIMARY KEY (c) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +ALTER TABLE t1 DROP COLUMN d; +eval ALTER TABLE t1 ADD COLUMN d BIT(0) $extra_col_opts; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c,d) VALUES (0,POW(2,20)-1,b'1111111111111111111111111111111111111111111111111111111111111111',1); +SELECT BIN(a), HEX(b), c+0 FROM t1 WHERE d>0; + +INSERT INTO t1 (a,b,c,d) VALUES (1,0,-2,0); +--sorted_result +SELECT a+0, b+0, c+0 FROM t1 WHERE d<100; + +INSERT INTO t1 (a,b,c,d) VALUES (b'1', 'f', 0xFF, 0x0); +--sorted_result +SELECT a+0, b+0, c+0 FROM t1 WHERE d IN (0, 2); + +DELETE FROM t1; + +# Out of range values +# (should produce warnings) + +INSERT INTO t1 (a,b,c,d) VALUES (0x10,0,0,1); +SELECT a+0,b+0,c+0,d+0 FROM t1; + +INSERT INTO t1 (a,b,c,d) VALUES (0x01,0,0x10000000000000000,0); +--sorted_result +SELECT a+0,b+0,c+0,d+0 FROM t1; + +DROP TABLE t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +eval CREATE TABLE t1 (pk INT PRIMARY KEY, a BIT(65) $extra_col_opts) ENGINE=rocksdb; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test new file mode 100644 index 0000000000000..8d57cabffc8fc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BIT column type +# + +--source type_bit.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test new file mode 100644 index 0000000000000..e4f4bb81819e7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bit_indexes.test @@ -0,0 +1,113 @@ +--source include/have_rocksdb.inc + +# +# BIT columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + a BIT, + b BIT(20) PRIMARY KEY, + c BIT(32), + d BIT(64) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d) VALUES +(0,0xFFFFF,0,1),(0,256,0xAAA,0x12345),(1,16,0,0xFFFFFFF),(0,11,12,13), +(1,100,101,102),(0,12,13,14),(1,13,14,15),(0,101,201,202),(1,1000,1001,1002), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF); + +--replace_column 9 # +EXPLAIN SELECT b+0 FROM t1 ORDER BY b; +SELECT b+0 FROM t1 ORDER BY b; + +DROP TABLE t1; + +--echo # TODO: Unique indexes are not enforced +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a BIT, + b BIT(20), + c BIT(32), + d BIT(64), + pk BIT(10) PRIMARY KEY, +UNIQUE INDEX b_c (b,c) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); + +--replace_column 9 # +EXPLAIN SELECT HEX(b+c) FROM t1 WHERE c > 1 OR HEX(b) < 0xFFFFFF; +--sorted_result +SELECT HEX(b+c) FROM t1 WHERE c > 1 OR HEX(b) < 0xFFFFFF; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + a BIT, + b BIT(20), + c BIT(32), + d BIT(64), + pk BIT(10) PRIMARY KEY, + INDEX(a) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT a+0 FROM t1 ORDER BY a; +SELECT DISTINCT a+0 FROM t1 ORDER BY a; + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a BIT, + b BIT(20), + c BIT(32), + d BIT(64), + pk BIT(10) PRIMARY KEY, + UNIQUE INDEX (d) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,d,pk) VALUES +(0,0xFFFFF,0,1,1),(0,256,0xAAA,0x12345,2),(1,16,0,0xFFFFFFF,3),(0,11,12,13,4), +(1,100,101,102,5),(0,12,13,14,6),(1,13,14,15,7),(0,101,201,202,8),(1,1000,1001,1002,9), +(1,0xFFFF,0xFFFFFFFF,0xFFFFFFFFFFFFFFFF,10); + +--replace_column 9 # +EXPLAIN SELECT d FROM t1 WHERE d BETWEEN 1 AND 10000; +--sorted_result +SELECT d+0 FROM t1 WHERE d BETWEEN 1 AND 10000; + +DROP TABLE t1; + +--enable_parsing + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc new file mode 100644 index 0000000000000..723b3ee528cc0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.inc @@ -0,0 +1,49 @@ +# +# BLOB column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + b BLOB $extra_col_opts, + b0 BLOB(0) $extra_col_opts, + b1 BLOB(1) $extra_col_opts, + b300 BLOB(300) $extra_col_opts, + bm BLOB(65535) $extra_col_opts, + b70k BLOB(70000) $extra_col_opts, + b17m BLOB(17000000) $extra_col_opts, + t TINYBLOB $extra_col_opts, + m MEDIUMBLOB $extra_col_opts, + l LONGBLOB $extra_col_opts +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values +# (cannot get MAX for all columns due to max_allowed_packet limitations) + +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), HEX(REPEAT('g',1048576)), REPEAT('h',255), REPEAT('i',1048576), HEX(REPEAT('j',1048576)) ); + +--sorted_result +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; + +# Invalid values (produce warnings, except for mediumblob and longblob columns for which the values are within limits) + +INSERT INTO t1 (b,b0,b1,b300,bm,b70k,b17m,t,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); + +--sorted_result +SELECT LENGTH(b), LENGTH(b0), LENGTH(b1), LENGTH(b300), LENGTH(bm), LENGTH(b70k), LENGTH(b17m), LENGTH(t), LENGTH(m), LENGTH(l) FROM t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +ALTER TABLE t1 ADD COLUMN bbb BLOB(4294967296); + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test new file mode 100644 index 0000000000000..54859707091e3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BLOB column types +# + +--source type_blob.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt new file mode 100644 index 0000000000000..6ad42e58aa22c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test new file mode 100644 index 0000000000000..24c70e8e73345 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_blob_indexes.test @@ -0,0 +1,176 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +# +# BLOB columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + b BLOB, + t TINYBLOB, + m MEDIUMBLOB, + l LONGBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +--replace_column 1 # 2 # 3 # 4 # 5 # 7 # 8 # 9 # 10 # +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(b,16) AS f FROM t1 WHERE b IN ('test1','test2') ORDER BY f; + +--replace_column 1 # 2 # 3 # 4 # 5 # 7 # 8 # 9 # 10 # +EXPLAIN SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(b,16) AS f FROM t1 USE INDEX () WHERE b IN ('test1','test2') ORDER BY f; + +DROP TABLE t1; + + +CREATE TABLE t1 ( + b BLOB, + t TINYBLOB, + m MEDIUMBLOB, + l LONGBLOB, + pk INT AUTO_INCREMENT PRIMARY KEY, + UNIQUE INDEX l_t (l(256),t(64)) +) ENGINE=rocksdb; + +--replace_column 6 # 7 # 10 # 11 # +SHOW INDEX IN t1; + +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +# Here we are getting possible key l_t, but not the final key +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; +SELECT SUBSTRING(t,64), SUBSTRING(l,256) FROM t1 FORCE INDEX (l_t) WHERE t!=l AND l NOT IN ('test1') ORDER BY t, l DESC; + +DROP TABLE t1; + + +CREATE TABLE t1 ( + b BLOB, + t TINYBLOB, + m MEDIUMBLOB, + l LONGBLOB, + pk INT AUTO_INCREMENT PRIMARY KEY, + INDEX (m(128)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (b,t,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +(HEX('abcd'),HEX('def'),HEX('a'),HEX('abc')), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; +SELECT SUBSTRING(m,128) AS f FROM t1 IGNORE INDEX FOR ORDER BY (m) WHERE m = 'test1' ORDER BY f DESC; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b BLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b TINYBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b MEDIUMBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGBLOB, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc new file mode 100644 index 0000000000000..cddc0822c4421 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.inc @@ -0,0 +1,64 @@ +# +# BOOLEAN column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + b1 BOOL $extra_col_opts, + b2 BOOLEAN $extra_col_opts +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values + +INSERT INTO t1 (b1,b2) VALUES (1,TRUE); +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (FALSE,0); +--sorted_result +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (2,3); +--sorted_result +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (-1,-2); +--sorted_result +SELECT b1,b2 FROM t1; + +--sorted_result +SELECT IF(b1,'true','false') AS a, IF(b2,'true','false') AS b FROM t1; + +--sorted_result +SELECT b1,b2 FROM t1 WHERE b1 = TRUE; + +--sorted_result +SELECT b1,b2 FROM t1 WHERE b2 = FALSE; + +# Invalid values + +INSERT INTO t1 (b1,b2) VALUES ('a','b'); +--sorted_result +SELECT b1,b2 FROM t1; + +INSERT INTO t1 (b1,b2) VALUES (128,-129); +--sorted_result +SELECT b1,b2 FROM t1; + +# This is why we don't have zerofill and unsigned tests +# for boolean columns: +--error ER_PARSE_ERROR +eval ALTER TABLE t1 ADD COLUMN b3 BOOLEAN UNSIGNED $extra_col_opts; + +--error ER_PARSE_ERROR +eval ALTER TABLE ADD COLUMN b3 BOOL ZEROFILL $extra_col_opts; + +DROP TABLE t1; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test new file mode 100644 index 0000000000000..d5a3b9be83de1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_bool.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# BOOLEAN column type +# + +--source type_bool.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc new file mode 100644 index 0000000000000..d770dc608fdfb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.inc @@ -0,0 +1,45 @@ +# +# CHAR column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + c CHAR $extra_col_opts, + c0 CHAR(0) $extra_col_opts, + c1 CHAR(1) $extra_col_opts, + c20 CHAR(20) $extra_col_opts, + c255 CHAR(255) $extra_col_opts, + PRIMARY KEY (c255) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values + +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('','','','',''); +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('a','','b','abcdefghi klmnopqrst', 'Creating an article for the Knowledgebase is similar to asking questions. First, navigate to the category where you feel the article should be. Once there, double check that an article doesn\'t already exist which would work.'); + +--sorted_result +SELECT c,c0,c1,c20,c255 FROM t1; + +# Invalid values + +INSERT INTO t1 (c,c0,c1,c20,c255) VALUES ('abc', 'a', 'abc', REPEAT('a',21), REPEAT('x',256)); +INSERT INTO t1 (c,c0,c1,c20,c255) SELECT c255, c255, c255, c255, CONCAT('a',c255,c1) FROM t1; + +--sorted_result +SELECT c,c0,c1,c20,c255 FROM t1; + +--sorted_result +SELECT DISTINCT c20, REPEAT('a',LENGTH(c20)), COUNT(*) FROM t1 GROUP BY c1, c20; + +--error ER_TOO_BIG_FIELDLENGTH +eval ALTER TABLE t1 ADD COLUMN c257 CHAR(257) $extra_col_opts; + +DROP TABLE t1; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test new file mode 100644 index 0000000000000..347f83cad3348 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +# +# CHAR column types +# + +--source type_char.inc + +# Issue #226 +CREATE TABLE t1(c1 CHAR(0) NOT NULL); +INSERT INTO t1 VALUES('a'); +SELECT * FROM t1; +DROP TABLE t1; + +# Issue #259 +CREATE TABLE t1(a char(10) character set utf8 collate utf8_bin primary key); +INSERT INTO t1 VALUES ('one'),('two'),('three'),('four'),('five'); +(SELECT * FROM t1 LIMIT 1) UNION (SELECT * FROM t1); +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test new file mode 100644 index 0000000000000..6ee2f03e74dc2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc + +# +# CHAR and VARCHAR columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + c CHAR, + c20 CHAR(20) PRIMARY KEY, + v16 VARCHAR(16), + v128 VARCHAR(128) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT c20 FROM t1 ORDER BY c20; +SELECT c20 FROM t1 ORDER BY c20; + +--replace_column 9 # +EXPLAIN SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; +SELECT c20 FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY c20; + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + c CHAR, + c20 CHAR(20), + v16 VARCHAR(16), + v128 VARCHAR(128), + pk CHAR(64) PRIMARY KEY, + UNIQUE INDEX c_v (c,v128) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (c,c20,v16,v128) VALUES ('a','char1','varchar1a','varchar1b'),('a','char2','varchar2a','varchar2b'),('b','char3','varchar1a','varchar1b'),('c','char4','varchar3a','varchar3b'); + +--replace_column 9 # +EXPLAIN SELECT c, v128 FROM t1 WHERE c != 'a' AND v128 > 'varchar'; +--sorted_result +SELECT c, v128 FROM t1 WHERE c != 'a' AND v128 > 'varchar'; + +--replace_column 9 # +EXPLAIN SELECT v128, COUNT(*) FROM t1 GROUP BY v128; +--sorted_result +SELECT v128, COUNT(*) FROM t1 GROUP BY v128; + +--replace_column 9 # +EXPLAIN SELECT v128, COUNT(*) FROM t1 USE INDEX FOR GROUP BY (c_v) GROUP BY v128; +--sorted_result +SELECT v128, COUNT(*) FROM t1 USE INDEX FOR GROUP BY (c_v) GROUP BY v128; + +SET SESSION optimizer_switch = 'engine_condition_pushdown=on'; +--replace_column 9 # +EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a'; +--sorted_result +SELECT c,c20,v16,v128 FROM t1 WHERE c > 'a'; +SET SESSION optimizer_switch = @@global.optimizer_switch; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + c CHAR, + c20 CHAR(20), + v16 VARCHAR(16), + v128 VARCHAR(128), + pk VARCHAR(64) PRIMARY KEY, + INDEX (v16) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4'); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%'; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; +--sorted_result +SELECT SUBSTRING(v16,7,3) FROM t1 IGNORE INDEX (v16) WHERE v16 LIKE 'varchar%'; + +--replace_column 9 # +EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; +--sorted_result +SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test new file mode 100644 index 0000000000000..d231236bd9288 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_char_indexes_collation.test @@ -0,0 +1,126 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +# Test if unknown collation works. +set session debug_dbug= "+d,myrocks_enable_unknown_collation_index_only_scans"; +create table t (id int not null auto_increment primary key, + c varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci, + key sk (c)); +insert into t (c) values ('☀'), ('ß'); +--replace_column 9 # +explain select c from t; +select c from t; +drop table t; +set session debug_dbug= "-d,myrocks_enable_unknown_collation_index_only_scans"; + +# Testing if all characters in latin1 charset get restored correctly. This is +# done by comparing results from a PK scan. +create table t (id int not null auto_increment, + c1 varchar(1) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + c2 char(1) CHARACTER SET latin1 COLLATE latin1_general_ci, + primary key (id), + key sk1 (c1), + key sk2 (c2)); + +let $i = 0; + +--disable_query_log +while ($i < 256) +{ + --eval insert into t (c1, c2) values (CHAR('$i'), CHAR('$i')) + inc $i; +} +--enable_query_log + +--replace_column 9 # +explain select hex(c1) from t order by c1; +--replace_column 9 # +explain select hex(c1) from t IGNORE INDEX (sk1) order by c1; + +--replace_column 9 # +explain select hex(c2) from t order by c2; +--replace_column 9 # +explain select hex(c2) from t IGNORE INDEX (sk1) order by c2; + +--let $file1=$MYSQLTEST_VARDIR/tmp/filesort_order +--let $file2=$MYSQLTEST_VARDIR/tmp/sk_order +--disable_query_log +--eval select hex(weight_string(c1)) INTO OUTFILE '$file1' from t order by c1 +--eval select hex(weight_string(c1)) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c1 +--enable_query_log + +--diff_files $file1 $file2 +--remove_file $file1 +--remove_file $file2 + +--disable_query_log +--eval select hex(weight_string(c2)) INTO OUTFILE '$file1' from t order by c2 +--eval select hex(weight_string(c2)) INTO OUTFILE '$file2' from t IGNORE INDEX (sk1) order by c2 +--enable_query_log + +--diff_files $file1 $file2 +--remove_file $file1 +--remove_file $file2 + +truncate t; + +# Test handling of spaces at the end of fields. +insert into t (c1, c2) values ('Asdf ', 'Asdf '); +select char_length(c1), char_length(c2), c1, c2 from t; + +drop table t; + +create table t (id int not null auto_increment, + c2 char(255) CHARACTER SET latin1 COLLATE latin1_general_ci, + primary key (id), + unique key sk2 (c2)); + +insert into t (c2) values ('Asdf'); +--error ER_DUP_ENTRY +insert into t (c2) values ('asdf '); + +drop table t; + +create table t (id int not null auto_increment, + c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + primary key (id), + unique key sk1 (c1)); + +insert into t (c1) values ('Asdf'); +--error ER_DUP_ENTRY +insert into t (c1) values ('asdf '); +--error ER_DUP_ENTRY +insert into t (c1) values ('asdf'); + +drop table t; + +create table t (id int not null auto_increment, + c1 varchar(256) CHARACTER SET latin1 COLLATE latin1_swedish_ci, + primary key (id), + unique key sk1 (c1(1))); + +insert into t (c1) values ('Asdf'); +insert into t (c1) values ('bbbb '); +--error ER_DUP_ENTRY +insert into t (c1) values ('a '); + +--replace_column 9 # +explain select c1 from t; +select c1 from t; + +drop table t; + +# Test varchar keyparts with key prefix +set session rocksdb_verify_row_debug_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))) engine=rocksdb default charset=latin1; +insert into t values (1, ' a'); +--replace_column 9 # +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; + +# Test varchar with length greater than 255 +create table t (id int primary key, email varchar(767), KEY email_i (email)) engine=rocksdb default charset=latin1; +insert into t values (1, REPEAT('a', 700)); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc new file mode 100644 index 0000000000000..18ed7436b6224 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.inc @@ -0,0 +1,47 @@ +# +# Date and time column types +# (DATE, DATETIME, TIMESTAMP, TIME, YEAR) +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +set @save_time_zone=@@time_zone; +set time_zone='UTC'; +eval CREATE TABLE t1 ( + d DATE $extra_col_opts, + dt DATETIME $extra_col_opts, + ts TIMESTAMP $extra_col_opts, + t TIME $extra_col_opts, + y YEAR $extra_col_opts, + y4 YEAR(4) $extra_col_opts, + y2 YEAR(2) $extra_col_opts, + pk DATETIME PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +SET @tm = '2012-04-09 05:27:00'; + +# Valid values +# '1970-01-01 00:00:01' +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('1000-01-01', '1000-01-01 00:00:00', FROM_UNIXTIME(1), '-838:59:59', '1901', '1901', '00','2012-12-12 12:12:12'), +('9999-12-31', '9999-12-31 23:59:59', FROM_UNIXTIME(2147483647), '838:59:59', '2155', '2155', '99','2012-12-12 12:12:13'), +('0000-00-00', '0000-00-00 00:00:00', '0000-00-00 00:00:00', '00:00:00', '0', '0', '0','2012-12-12 12:12:14'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),YEAR(@tm),YEAR(@tm),'2012-12-12 12:12:15'); + +--sorted_result +SELECT d,dt,ts,t,y,y4,y2 FROM t1; + +# Invalid values + +INSERT INTO t1 (d,dt,ts,t,y,y4,y2,pk) VALUES +('999-13-32', '999-11-31 00:00:00', '0', '-839:00:00', '1900', '1900', '-1','2012-12-12 12:12:16'); + +SELECT d,dt,ts,t,y,y4,y2 FROM t1; +set time_zone=@save_time_zone; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test new file mode 100644 index 0000000000000..af4e006c900da --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time.test @@ -0,0 +1,9 @@ +--source include/have_rocksdb.inc + +# +# Date and time column types +# (DATE, DATETIME, TIMESTAMP, TIME, YEAR) +# + +--source type_date_time.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test new file mode 100644 index 0000000000000..06cf86b7661aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_date_time_indexes.test @@ -0,0 +1,157 @@ +--source include/have_rocksdb.inc + +# +# Date and time columns with indexes +# (DATE, DATETIME, TIMESTAMP, TIME, YEAR) +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + + +CREATE TABLE t1 ( + d DATE, + dt DATETIME PRIMARY KEY, + ts TIMESTAMP, + t TIME, + y YEAR +) ENGINE=rocksdb; + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm)); + +--replace_column 9 # +EXPLAIN SELECT dt FROM t1 ORDER BY dt LIMIT 3; +SELECT dt FROM t1 ORDER BY dt LIMIT 3; + +--replace_column 9 # +EXPLAIN SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; +SELECT dt FROM t1 FORCE INDEX FOR ORDER BY (PRIMARY) ORDER BY dt LIMIT 3; + +--error ER_DUP_ENTRY +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-11', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000'); + +DROP TABLE t1; + +CREATE TABLE t1 ( + d DATE, + dt DATETIME, + ts TIMESTAMP, + t TIME, + y YEAR, + pk TIME PRIMARY KEY, + INDEX (ts) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','12:00:00'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','12:01:00'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','12:02:00'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','12:03:00'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','12:04:00'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00'); + +--replace_column 9 # +EXPLAIN SELECT ts FROM t1 WHERE ts > NOW(); +--sorted_result +SELECT ts FROM t1 WHERE ts > NOW(); + +--replace_column 9 # +EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); +--sorted_result +SELECT ts FROM t1 USE INDEX () WHERE ts > NOW(); + +DROP TABLE t1; + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + d DATE, + dt DATETIME, + ts TIMESTAMP, + t TIME, + y YEAR, + pk YEAR PRIMARY KEY, + UNIQUE INDEX d_t (d,t) +) ENGINE=rocksdb; + + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','1990'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','1991'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','1992'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','1993'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','1994'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'1995'); + +--replace_column 9 # +EXPLAIN SELECT d, t FROM t1 WHERE CONCAT(d,' ',t) != CURRENT_DATE(); +--sorted_result +SELECT d, t FROM t1 WHERE CONCAT(d,' ',t) != CURRENT_DATE(); + +--replace_column 9 # +EXPLAIN SELECT d, t FROM t1 IGNORE INDEX (d_t) WHERE CONCAT(d,' ',t) != CURRENT_DATE(); +--sorted_result +SELECT d, t FROM t1 IGNORE INDEX (d_t) WHERE CONCAT(d,' ',t) != CURRENT_DATE(); + +--error ER_DUP_ENTRY +INSERT INTO t1 (d,dt,ts,t,y) VALUES +('2012-01-12', '2010-11-22 12:33:53', '2011-11-14 21:45:55', '00:12:33', '2000'); + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + d DATE, + dt DATETIME, + ts TIMESTAMP, + t TIME, + y YEAR, + pk TIME PRIMARY KEY, + INDEX (y,t) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; +SET @tm = '2012-04-09 05:27:00'; + +INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES +('2012-01-12', '2010-11-22 12:33:54', '2011-11-14 21:45:55', '00:12:33', '2000','18:18:18'), +('2012-01-12', '2010-11-22 11:43:14', '2011-11-14 21:45:55', '00:12:32', '2001','19:18:18'), +('2012-03-31', '2011-08-28 21:33:56', '1999-04-30 19:11:08', '12:00:00', '1999','20:18:18'), +('2012-03-13', '2011-08-27 21:33:56', '1999-03-30 19:11:08', '12:10:00', '1998','21:18:18'), +('2011-03-31', '2011-08-28 20:33:56', '1997-01-31 11:54:01', '22:04:10', '1994','22:18:18'), +(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'23:18:18'); + +--replace_column 9 # +EXPLAIN SELECT y, COUNT(*) FROM t1 GROUP BY y; +--sorted_result +SELECT y, COUNT(*) FROM t1 GROUP BY y; + +--replace_column 9 # +EXPLAIN SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; +--sorted_result +SELECT y, COUNT(*) FROM t1 USE INDEX FOR GROUP BY () GROUP BY y; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt new file mode 100644 index 0000000000000..33e72265db2c0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=10 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test new file mode 100644 index 0000000000000..ee325b34eff70 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_decimal.test @@ -0,0 +1,88 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +drop table if exists t1, t2; +--enable_warnings + +--echo # +--echo # Check that DECIMAL datatype supports 'index-only' scans and is decoded correctly. +--echo # (Decoding happens from the mem-comparable image in the index, regardless +--echo # of whether the value part has original value or not) +--echo # + +create table t1 ( + pk int not null primary key, + col1 decimal (2,1) signed, + col2 decimal (2,1) unsigned, + filler varchar(100), + key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-9.1, 0.7, 'filler'), +(2,-8.2, 1.6, 'filler'), +(3, 0.3, 2.5, 'filler'), +(4, 1.4, 3.4, 'filler'), +(5, 2.5, 4.3, 'filler'), +(6, 3.3, 5.3, 'filler'); +insert into t1 select pk+100, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+200, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+1000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1; +insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1; +analyze table t1; + +--echo # The following can't use index-only: +--replace_column 9 # +explain select * from t1 where col1 between -8 and 8; + +--echo # This will use index-only: +--replace_column 9 # +explain +select col1, col2 from t1 where col1 between -8 and 8; +select col1, col2 from t1 where col1 between -8 and 8; + +insert into t1 values (11, NULL, 0.9, 'row1-with-null'); +insert into t1 values (10, -8.4, NULL, 'row2-with-null'); +--replace_column 9 # +explain +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; +select col1, col2 from t1 force index(key1) where col1 is null or col1 < -7; + +--echo # Try an UPDATE +select * from t1 where pk in (3,4); +update t1 set col2= col2+0.2 where pk in (3,4); +select * from t1 where pk in (3,4); + +drop table t1; + +--echo # +--echo # Try another DECIMAL-based type that takes more space +--echo # +create table t1 ( + pk int not null primary key, + col1 decimal (12,6) signed, + col2 decimal (12,6) unsigned, + filler varchar(100), + key key1(col1, col2) +)engine=rocksdb; +insert into t1 values +(1,-900.001, 000.007, 'filler'), +(2,-700.002, 100.006, 'filler'), +(3, 000.003, 200.005, 'filler'), +(4, 100.004, 300.004, 'filler'), +(5, 200.005, 400.003, 'filler'), +(6, 300.003, 500.003, 'filler'); +insert into t1 select pk+100, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+200, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+1000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1; +insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1; +analyze table t1; + +--replace_column 9 # +explain +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +select col1, col2 from t1 force index(key1) where col1 between -800 and 800; +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc new file mode 100644 index 0000000000000..8184f6261cccf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc @@ -0,0 +1,50 @@ +# +# ENUM column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Valid values. +# We cannot test the maximum of 65,536 here, +# because mysqltest has the standard limit of MAX_QUERY=256K; +# but we can at least try 257 + +eval CREATE TABLE t1 ( + a ENUM('') $extra_col_opts, + b ENUM('test1','test2','test3','test4','test5') $extra_col_opts, + c ENUM('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i','3j','3k','3l','3m','3n','3o','3p','3q','3r','3s','3t','3u','3v','3w','3x','3y','3z','40','41','42','43','44','45','46','47','48','49','4a','4b','4c','4d','4e','4f','4g','4h','4i','4j','4k','4l','4m','4n','4o','4p','4q','4r','4s','4t','4u','4v','4w','4x','4y','4z','50','51','52','53','54','55','56','57','58','59','5a','5b','5c','5d','5e','5f','5g','5h','5i','5j','5k','5l','5m','5n','5o','5p','5q','5r','5s','5t','5u','5v','5w','5x','5y','5z','60','61','62','63','64','65','66','67','68','69','6a','6b','6c','6d','6e','6f','6g','6h','6i','6j','6k','6l','6m','6n','6o','6p','6q','6r','6s','6t','6u','6v','6w','6x','6y','6z','70','71','72','73','74','75') $extra_col_opts, + PRIMARY KEY (b) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c) VALUES ('','test2','4'),('',5,2); +SELECT a,b,c FROM t1; + +# Out of range values +# (should produce warnings) + +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +--sorted_result +SELECT a,b,c FROM t1; + +# Non-unique values in enum +# (should produce a warning) +eval ALTER TABLE t1 ADD COLUMN e ENUM('a','A') $extra_col_opts; +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c,e) VALUES ('','test3','75','A'); +--sorted_result +SELECT a,b,c,e FROM t1; + +# Simple comparison + +--sorted_result +SELECT a,b,c,e FROM t1 WHERE b='test2' OR a != ''; + +DROP TABLE t1; + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test new file mode 100644 index 0000000000000..d79469b2fad25 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# ENUM column type +# + +--source type_enum.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test new file mode 100644 index 0000000000000..d7086a45fe158 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_enum_indexes.test @@ -0,0 +1,93 @@ +--source include/have_rocksdb.inc + +# +# ENUM columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), + b ENUM('test1','test2','test3','test4','test5'), + c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'), + pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + UNIQUE KEY a_b (a,b) +) ENGINE=rocksdb; + + +INSERT INTO t1 (a,b,c,pk) VALUES +('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3), +('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6); + +SHOW INDEX IN t1; + +--replace_column 9 # +EXPLAIN SELECT a FROM t1 WHERE b > 'test2' ORDER BY a; +SELECT a FROM t1 WHERE b > 'test2' ORDER BY a; + +--replace_column 9 # +EXPLAIN SELECT a FROM t1 FORCE INDEX (a_b) WHERE b > 'test2' ORDER BY a; +SELECT a FROM t1 FORCE INDEX (a_b) WHERE b > 'test2' ORDER BY a; + +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 ( + a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), + b ENUM('test1','test2','test3','test4','test5'), + c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z') PRIMARY KEY +) ENGINE=rocksdb; + +INSERT INTO t1 (a,b,c) VALUES +('N.America','test1','5a'),('Europe','test1','5b'),('Europe','test2','6v'), +('Africa','test3','4z'),('Africa','test4','1j'),('Antarctica','test4','1d'); + +SHOW INDEX IN t1; + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; +--sorted_result +SELECT c FROM t1 WHERE c BETWEEN '1d' AND '6u'; + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; +--sorted_result +SELECT c FROM t1 USE INDEX () WHERE c BETWEEN '1d' AND '6u'; + +DROP TABLE t1; + +CREATE TABLE t1 ( + a ENUM('N.America','S.America','Africa','Europe','Australia','Asia','Antarctica'), + b ENUM('test1','test2','test3','test4','test5'), + c ENUM('1a','1b','1d','1j','4a','4z','5a','5b','6v','6z'), + pk ENUM('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + INDEX(b) +) ENGINE=rocksdb; + +INSERT INTO t1 (a,b,c,pk) VALUES +('N.America','test1','5a',1),('Europe','test1','5b',2),('Europe','test2','6v',3), +('Africa','test3','4z',4),('Africa','test4','1j',5),('Antarctica','test4','1d',6); + +SHOW INDEX IN t1; + +--replace_column 9 # +EXPLAIN SELECT DISTINCT b FROM t1; +--sorted_result +SELECT DISTINCT b FROM t1; + +--replace_column 9 # +EXPLAIN SELECT DISTINCT b FROM t1 IGNORE INDEX (b); +--sorted_result +SELECT DISTINCT b FROM t1 IGNORE INDEX (b); + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc new file mode 100644 index 0000000000000..424f7c4f4ac2c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.inc @@ -0,0 +1,85 @@ +# +# Fixed point types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + d DECIMAL $extra_col_opts, + d0 DECIMAL(0) $extra_col_opts, + d1_1 DECIMAL(1,1) $extra_col_opts, + d10_2 DECIMAL(10,2) $extra_col_opts, + d60_10 DECIMAL(60,10) $extra_col_opts, + n NUMERIC $extra_col_opts, + n0_0 NUMERIC(0,0) $extra_col_opts, + n1 NUMERIC(1) $extra_col_opts, + n20_4 NUMERIC(20,4) $extra_col_opts, + n65_4 NUMERIC(65,4) $extra_col_opts, + pk NUMERIC $extra_col_opts PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Always valid values + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (100,123456,0.3,40000.25,123456789123456789.10001,1024,7000.0,8.0,999999.9,9223372036854775807,1); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.0,9999999999.0,0.9,99999999.99,99999999999999999999999999999999999999999999999999.9999999999,9999999999.0,9999999999.0,9.0,9999999999999999.9999,9999999999999999999999999999999999999999999999999999999999999.9999,3); + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +# Values which can be valid or not, +# depending on whether columns are SIGNED or UNSIGNED +# (if not valid should produce warnings) + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-100,-123456,-0.3,-40000.25,-123456789123456789.10001,-1024,-7000.0,-8.0,-999999.9,-9223372036854775807,4); +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (-9999999999.0,-9999999999.0,-0.9,-99999999.99,-99999999999999999999999999999999999999999999999999.9999999999,-9999999999.0,-9999999999.0,-9.0,-9999999999999999.9999,-9999999999999999999999999999999999999999999999999999999999999.9999,5); + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1 WHERE n20_4 = 9999999999999999.9999 OR d < 100; + +# Invalid values + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES ( + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 6 +); + +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (10000000000.0,10000000000.0,1.1,100000000.99,100000000000000000000000000000000000000000000000000.0,10000000000.0,10000000000.0,10.0,10000000000000000.9999,10000000000000000000000000000000000000000000000000000000000000.9999,7); +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +INSERT INTO t1 (d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4,pk) VALUES (9999999999.1,9999999999.1,1.9,99999999.001,99999999999999999999999999999999999999999999999999.99999999991,9999999999.1,9999999999.1,9.1,9999999999999999.00001,9999999999999999999999999999999999999999999999999999999999999.11111,8); +--sorted_result +SELECT d,d0,d1_1,d10_2,d60_10,n,n0_0,n1,n20_4,n65_4 FROM t1; + +--error ER_TOO_BIG_PRECISION +eval ALTER TABLE t1 ADD COLUMN n66 NUMERIC(66) $extra_col_opts; + +--error ER_TOO_BIG_PRECISION +eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(66,6) $extra_col_opts; + +--error ER_TOO_BIG_SCALE +eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(66,66) $extra_col_opts; + +DROP TABLE t1; + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test new file mode 100644 index 0000000000000..4f2c09d17c1b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# Fixed point types +# + +--source type_fixed.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test new file mode 100644 index 0000000000000..e9e6df58d21ad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_fixed_indexes.test @@ -0,0 +1,107 @@ +--source include/have_rocksdb.inc + +# +# Fixed point columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + + +CREATE TABLE t1 ( + d1 DECIMAL(10,2) PRIMARY KEY, + d2 DECIMAL(60,10), + n1 NUMERIC, + n2 NUMERIC(65,4) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (d1,d2,n1,n2) VALUES +(10.22,60.12345,123456,14.3456), +(10.0,60.12345,123456,14), +(11.14,15,123456,13), +(100,100,1,2), +(0,0,0,0), +(4540424564.23,3343303441.0,12,13), +(15,17,23,100000); + +--replace_column 9 # +EXPLAIN SELECT d1 FROM t1 ORDER BY d1 DESC; +SELECT d1 FROM t1 ORDER BY d1 DESC; + +--replace_column 9 # +EXPLAIN SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; +SELECT d1 FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY d1 DESC; + +DROP TABLE t1; + +# --error ER_GET_ERRMSG +CREATE TABLE t1 ( + d1 DECIMAL(10,2), + d2 DECIMAL(60,10), + n1 NUMERIC, + n2 NUMERIC(65,4), + pk NUMERIC PRIMARY KEY, + UNIQUE INDEX n1_n2 (n1,n2) +) ENGINE=rocksdb; + +# --disable_parsing + +SHOW INDEX IN t1; + +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT n1+n2 FROM t1; +--sorted_result +SELECT DISTINCT n1+n2 FROM t1; + +DROP TABLE t1; + +#--enable_parsing + +CREATE TABLE t1 ( + d1 DECIMAL(10,2), + d2 DECIMAL(60,10), + n1 NUMERIC, + n2 NUMERIC(65,4), + pk DECIMAL(20,10) PRIMARY KEY, + INDEX (d2) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (d1,d2,n1,n2,pk) VALUES +(10.22,60.12345,123456,14.3456,1), +(10.0,60.12345,123456,14,2), +(11.14,15,123456,13,3), +(100,100,1,2,4), +(0,0,0,0,5), +(4540424564.23,3343303441.0,12,13,6), +(15,17,23,100000,7); + +--replace_column 9 # +EXPLAIN SELECT d2, COUNT(*) FROM t1 GROUP BY d2; +--sorted_result +SELECT d2, COUNT(*) FROM t1 GROUP BY d2; + +--replace_column 9 # +EXPLAIN SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; +--sorted_result +SELECT d2, COUNT(*) FROM t1 IGNORE INDEX FOR GROUP BY (d2) GROUP BY d2; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc new file mode 100644 index 0000000000000..8403b088f9d38 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.inc @@ -0,0 +1,110 @@ +# +# Float types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + f FLOAT $extra_col_opts, + f0 FLOAT(0) $extra_col_opts, + r1_1 REAL(1,1) $extra_col_opts, + f23_0 FLOAT(23) $extra_col_opts, + f20_3 FLOAT(20,3) $extra_col_opts, + d DOUBLE $extra_col_opts, + d1_0 DOUBLE(1,0) $extra_col_opts, + d10_10 DOUBLE PRECISION (10,10) $extra_col_opts, + d53 DOUBLE(53,0) $extra_col_opts, + d53_10 DOUBLE(53,10) $extra_col_opts, + pk DOUBLE $extra_col_opts PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Always valid values + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (12345.12345,12345.12345,0.9,123456789.123,56789.987,11111111.111,8.0,0.0123456789,1234566789123456789,99999999999999999.99999999,1); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (0,0,0,0,0,0,0,0,0,0,2); +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( + 99999999999999999999999999999999999999, + 99999999999999999999999999999999999999.9999999999999999, + 0.9, + 99999999999999999999999999999999999999.9, + 99999999999999999.999, + 999999999999999999999999999999999999999999999999999999999999999999999999999999999, + 9, + 0.9999999999, + 1999999999999999999999999999999999999999999999999999999, + 19999999999999999999999999999999999999999999.9999999999, + 3 +); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +# Values which can be valid or not, +# depending on whether columns are SIGNED or UNSIGNED +# (if not valid should produce warnings) + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES (-999999999999999999999999,-99999999999.999999999999,-0.9,-999.99999999999999999999,-99999999999999999.999,-999999999999999999999999999999999999999999999999999999999999-0.999,-9,-.9999999999,-999999999999999999999999999999.99999999999999999999999,-9999999999999999999999999999999999999999999.9999999999,4); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +--sorted_result +--query_vertical SELECT MAX(f), MAX(f0), MAX(r1_1), MAX(f23_0), MAX(f20_3), MAX(d), MAX(d1_0), MAX(d10_10), MAX(d53), MAX(d53_10) FROM t1 + +# Invalid values + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 9999999999999999999999999999999999999999999999999999999999999.9999, + 5 +); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +INSERT INTO t1 (f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10,pk) VALUES ( + 999999999999999999999999999999999999999, + 999999999999999999999999999999999999999.9999999999999999, + 1.9, + 999999999999999999999999999999999999999.9, + 999999999999999999.999, + 9999999999999999999999999999999999999999999999999999999999999999999999999999999999, + 99, + 1.9999999999, + 1999999999999999999999999999999999999999999999999999999, + 19999999999999999999999999999999999999999999.9999999999, + 6 +); + +--sorted_result +--query_vertical SELECT f,f0,r1_1,f23_0,f20_3,d,d1_0,d10_10,d53,d53_10 FROM t1 + +# Error occurs on MySQL but not on MariaDB: +# --error ER_TOO_BIG_DISPLAYWIDTH +eval ALTER TABLE t1 ADD COLUMN d0_0 DOUBLE(0,0) $extra_col_opts; + +--error ER_TOO_BIG_PRECISION +eval ALTER TABLE t1 ADD COLUMN n66_6 DECIMAL(256,1) $extra_col_opts; + +# Error occurs on MySQL but not on MariaDB: +# --error ER_TOO_BIG_SCALE +eval ALTER TABLE t1 ADD COLUMN n66_66 DECIMAL(40,35) $extra_col_opts; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float.test b/storage/rocksdb/mysql-test/rocksdb/t/type_float.test new file mode 100644 index 0000000000000..1f1849992ca2f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# Float types +# + +--source type_float.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test new file mode 100644 index 0000000000000..907bc614d4999 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_float_indexes.test @@ -0,0 +1,175 @@ +--source include/have_rocksdb.inc + +# +# Float type columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + f FLOAT PRIMARY KEY, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp) VALUES +(1.2345,1422.22,1.2345,1234567.89), +(0,0,0,0), +(-1,-1,-1,-1), +(17.5843,4953453454.44,29229114.0,1111111.23), +(4644,1422.22,466664.999,0.5); + +--replace_column 9 # +EXPLAIN SELECT f FROM t1 ORDER BY f; +SELECT f FROM t1 ORDER BY f; + +--replace_column 9 # +EXPLAIN SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; +SELECT f FROM t1 IGNORE INDEX (PRIMARY) ORDER BY f; + +DROP TABLE t1; + +#--error ER_GET_ERRMSG +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk DOUBLE PRIMARY KEY, + UNIQUE KEY r_dp (r,dp) +) ENGINE=rocksdb; + +#--disable_parsing + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +--replace_column 9 # +EXPLAIN SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; +--sorted_result +SELECT r, dp FROM t1 WHERE r > 0 or dp > 0; + +DROP TABLE t1; + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + UNIQUE KEY(d) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +SELECT DISTINCT d FROM t1 ORDER BY d; + +DROP TABLE t1; + +#--enable_parsing + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + KEY(d) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d; +SELECT DISTINCT d FROM t1 ORDER BY d; + +DROP TABLE t1; + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + UNIQUE KEY(f) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +# Should fail because of 'unique' constraint +--error ER_DUP_ENTRY +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +SELECT DISTINCT f FROM t1 ORDER BY f; + +DROP TABLE t1; + +CREATE TABLE t1 ( + f FLOAT, + r REAL(20,3), + d DOUBLE, + dp DOUBLE PRECISION (64,10), + pk FLOAT PRIMARY KEY, + KEY(f) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,1422.22,1.2345,1234567.89,1), +(0,0,0,0,2), +(-1,-1,-1,-1,3), +(17.5843,4953453454.44,29229114.0,1111111.23,4), +(4644,1422.22,466664.999,0.5,5); + +# Should succeed because of no 'unique' constraint +INSERT INTO t1 (f,r,d,dp,pk) VALUES +(1.2345,0,0,0,6); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f; +SELECT DISTINCT f FROM t1 ORDER BY f; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc new file mode 100644 index 0000000000000..dbcdfe4fbdd80 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int.inc @@ -0,0 +1,68 @@ +# +# INT column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + i INT $extra_col_opts, + i0 INT(0) $extra_col_opts, + i1 INT(1) $extra_col_opts, + i20 INT(20) $extra_col_opts, + t TINYINT $extra_col_opts, + t0 TINYINT(0) $extra_col_opts, + t1 TINYINT(1) $extra_col_opts, + t20 TINYINT(20) $extra_col_opts, + s SMALLINT $extra_col_opts, + s0 SMALLINT(0) $extra_col_opts, + s1 SMALLINT(1) $extra_col_opts, + s20 SMALLINT(20) $extra_col_opts, + m MEDIUMINT $extra_col_opts, + m0 MEDIUMINT(0) $extra_col_opts, + m1 MEDIUMINT(1) $extra_col_opts, + m20 MEDIUMINT(20) $extra_col_opts, + b BIGINT $extra_col_opts, + b0 BIGINT(0) $extra_col_opts, + b1 BIGINT(1) $extra_col_opts, + b20 BIGINT(20) $extra_col_opts, + pk INT AUTO_INCREMENT PRIMARY KEY +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Always valid values + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (2147483647,2147483647,2147483647,2147483647,127,127,127,127,32767,32767,32767,32767,8388607,8388607,8388607,8388607,9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807); +--sorted_result +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; + +# Values which can be valid or not, +# depending on whether columns are SIGNED or UNSIGNED +# (if not valid should produce warnings) + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483648,-2147483648,-2147483648,-2147483648,-128,-128,-128,-128,-32768,-32768,-32768,-32768,-8388608,-8388608,-8388608,-8388608,-9223372036854775808,-9223372036854775808,-9223372036854775808,-9223372036854775808); +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967295,4294967295,4294967295,4294967295,255,255,255,255,65535,65535,65535,65535,16777215,16777215,16777215,16777215,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615); + +--sorted_result +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; + +# Invalid values + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (-2147483649,-2147483649,-2147483649,-2147483649,-129,-129,-129,-129,-32769,-32769,-32769,-32769,-8388609,-8388609,-8388609,-8388609,-9223372036854775809,-9223372036854775809,-9223372036854775809,-9223372036854775809); + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) VALUES (4294967296,4294967296,4294967296,4294967296,256,256,256,256,65536,65536,65536,65536,16777216,16777216,16777216,16777216,18446744073709551616,18446744073709551616,18446744073709551616,18446744073709551616); + +INSERT INTO t1 (i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20) SELECT b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b,b FROM t1 WHERE b IN (-9223372036854775808,9223372036854775807,18446744073709551615); + +--sorted_result +SELECT i,i0,i1,i20,t,t0,t1,t20,s,s0,s1,s20,m,m0,m1,m20,b,b0,b1,b20 FROM t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +eval ALTER TABLE t1 ADD COLUMN i257 INT(257) $extra_col_opts; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int.test b/storage/rocksdb/mysql-test/rocksdb/t/type_int.test new file mode 100644 index 0000000000000..e92f66921722a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# INT column types +# + +--source type_int.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test new file mode 100644 index 0000000000000..c95c3d88b8a05 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_int_indexes.test @@ -0,0 +1,75 @@ +--source include/have_rocksdb.inc + +# +# INT column types with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + i INT PRIMARY KEY, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT +) ENGINE=rocksdb; + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +--replace_column 9 # +EXPLAIN SELECT i FROM t1 ORDER BY i; +SELECT i FROM t1 ORDER BY i; + +DROP TABLE t1; + +CREATE TABLE t1 ( + i INT, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT, + pk SMALLINT AUTO_INCREMENT PRIMARY KEY, + INDEX s_m (s,m) +) ENGINE=rocksdb; + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +--replace_column 9 # +EXPLAIN SELECT s, m FROM t1 WHERE s != 10 AND m != 1; +--sorted_result +SELECT s, m FROM t1 WHERE s != 10 AND m != 1; + +DROP TABLE t1; + +--echo # RocksDB: unique indexes allowed +#--error ER_GET_ERRMSG +CREATE TABLE t1 ( + i INT, + t TINYINT, + s SMALLINT, + m MEDIUMINT, + b BIGINT, + pk MEDIUMINT AUTO_INCREMENT PRIMARY KEY, + UNIQUE KEY b_t (b,t) +) ENGINE=rocksdb; + +##--disable_parsing + +INSERT INTO t1 (i,t,s,m,b) VALUES (1,2,3,4,5),(1000,100,10000,1000000,1000000000000000000),(5,100,10000,1000000,100000000000000000),(2,3,4,5,6),(3,4,5,6,7),(101,102,103,104,105),(10001,103,10002,10003,10004),(10,11,12,13,14),(11,12,13,14,15),(12,13,14,15,16); + +# This query should use the index b_t, we just don't want to run EXPLAIN +# (to avoid mismatches due to different subquery-related plans) +SELECT b+t FROM t1 WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +SELECT b+t FROM t1 FORCE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; +SELECT b+t FROM t1 IGNORE INDEX (b_t) WHERE (b,t) IN ( SELECT b, t FROM t1 WHERE i>1 ) ORDER BY b+t; + +DROP TABLE t1; + +##--enable_parsing + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc new file mode 100644 index 0000000000000..25ae58294f797 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set.inc @@ -0,0 +1,49 @@ +# +# SET column type +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +# Valid values. + +eval CREATE TABLE t1 ( + a SET('') $extra_col_opts, + b SET('test1','test2','test3','test4','test5') $extra_col_opts, + c SET('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50''51','52','53','54','55','56','57','58','59','60','61','62','63','64') $extra_col_opts, + PRIMARY KEY (c) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,,23'), +('',5,2), +(',','test4,test2',''); +--sorted_result +SELECT a,b,c FROM t1; + +# Out of range values +# (should produce warnings) + +INSERT INTO t1 (a,b,c) VALUES (0,'test6',-1); +--sorted_result +SELECT a,b,c FROM t1; + +# Non-unique values in set +# (should produce a warning) +eval ALTER TABLE t1 ADD COLUMN e SET('a','A') $extra_col_opts; + +SHOW COLUMNS IN t1; + +--error ER_TOO_BIG_SET +eval ALTER TABLE t1 ADD COLUMN f SET('1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','11','12','13','14','15','16','17','18','19','1a','1b','1c','1d','1e','1f','1g','1h','1i','1j','1k','1l','1m','1n','1o','1p','1q','1r','1s','1t','1u','1v','1w','1x','1y','1z','20','21','22','23','24','25','26','27','28','29','2a','2b','2c','2d','2e','2f','2g','2h','2i','2j','2k','2l','2m','2n','2o','2p','2q','2r','2s','2t','2u','2v','2w','2x','2y','2z','30','31','32','33','34','35','36','37','38','39','3a','3b','3c','3d','3e','3f','3g','3h','3i') $extra_col_opts; + +# Simple comparison + +--sorted_result +SELECT a,b,c,e FROM t1 WHERE FIND_IN_SET('test2',b)>0 OR a != ''; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set.test new file mode 100644 index 0000000000000..bd306668e6d4a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# SET column type +# + +--source type_set.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test new file mode 100644 index 0000000000000..79232255d2559 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test @@ -0,0 +1,104 @@ +--source include/have_rocksdb.inc + +# +# SET columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), + b SET('test1','test2','test3','test4','test5'), + c SET('01','22','23','33','34','39','40','44','50','63','64') PRIMARY KEY +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,23'), +('',5,2), +('N.America,Asia','test4,test2',''), +('Africa,Europe,Asia','test2,test3','01'), +('Antarctica','test3','34,44'), +('Asia','test5','50'), +('Europe,S.America','test1,','39'); + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 ORDER BY c; +SELECT c FROM t1 ORDER BY c; + +--replace_column 9 # +EXPLAIN SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; +SELECT c FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) ORDER BY c; + +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b,c) VALUES ('Antarctica','test3','02'); + +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b,c) VALUES ('','test1','34,44'); + +DROP TABLE t1; + +CREATE TABLE t1 ( + a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), + b SET('test1','test2','test3','test4','test5'), + c SET('01','22','23','33','34','39','40','44','50','63','64'), + pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + INDEX(a) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c,pk) VALUES +('','test2,test3','01,34,44,23',1), +('',5,2,2), +('N.America,Asia','test4,test2','',3), +('Africa,Europe,Asia','test2,test3','01',4), +('Antarctica','test3','34,44',5), +('Asia','test5','50',6), +('Europe,S.America','test1,','39',7); + +--replace_column 9 # +EXPLAIN SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; +--sorted_result +SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; + +DROP TABLE t1; + +--disable_parsing +#--error ER_GET_ERRMSG +CREATE TABLE t1 ( + a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), + b SET('test1','test2','test3','test4','test5'), + c SET('01','22','23','33','34','39','40','44','50','63','64'), + pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, + UNIQUE INDEX b_a (b,a) +) ENGINE=innodb; + + +SHOW INDEX IN t1; + +INSERT INTO t1 (a,b,c) VALUES +('','test2,test3','01,34,44,23'), +('',5,2), +('N.America,Asia','test4,test2',''), +('Africa,Europe,Asia','test2,test3','01'), +('Antarctica','test3','34,44'), +('Asia','test5','50'), +('Europe,S.America','test1,','39'); + +--replace_column 9 # +EXPLAIN SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; +--sorted_result +SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; + +DROP TABLE t1; + +--enable_parsing + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc new file mode 100644 index 0000000000000..f5c9c3b89785f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text.inc @@ -0,0 +1,49 @@ +# +# TEXT column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +eval CREATE TABLE t1 ( + pk INT AUTO_INCREMENT PRIMARY KEY, + t TEXT $extra_col_opts, + t0 TEXT(0) $extra_col_opts, + t1 TEXT(1) $extra_col_opts, + t300 TEXT(300) $extra_col_opts, + tm TEXT(65535) $extra_col_opts, + t70k TEXT(70000) $extra_col_opts, + t17m TEXT(17000000) $extra_col_opts, + tt TINYTEXT $extra_col_opts, + m MEDIUMTEXT $extra_col_opts, + l LONGTEXT $extra_col_opts +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +# Valid values +# (cannot get MAX for all columns due to max_allowed_packet limitations) + +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +('','','','','','','','','',''), +('a','b','c','d','e','f','g','h','i','j'), +('test1','test2','test3','test4','test5','test6','test7','test8','test9','test10'), +( REPEAT('a',65535), REPEAT('b',65535), REPEAT('c',255), REPEAT('d',65535), REPEAT('e',65535), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',255), REPEAT('i',1048576), REPEAT('j',1048576) ); + +--sorted_result +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; + +# Invalid values (produce warnings, except for mediumtext and longtext columns for which the values are within limits) + +INSERT INTO t1 (t,t0,t1,t300,tm,t70k,t17m,tt,m,l) VALUES +( REPEAT('a',65536), REPEAT('b',65536), REPEAT('c',256), REPEAT('d',65536), REPEAT('e',65536), REPEAT('f',1048576), REPEAT('g',1048576), REPEAT('h',256), REPEAT('i',1048576), REPEAT('j',1048576) ); + +--sorted_result +SELECT LENGTH(t), LENGTH(t0), LENGTH(t1), LENGTH(t300), LENGTH(tm), LENGTH(t70k), LENGTH(t17m), LENGTH(tt), LENGTH(m), LENGTH(l) FROM t1; + +--error ER_TOO_BIG_DISPLAYWIDTH +eval ALTER TABLE t1 ADD COLUMN ttt TEXT(4294967296) $extra_col_opts; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text.test b/storage/rocksdb/mysql-test/rocksdb/t/type_text.test new file mode 100644 index 0000000000000..c6dd24ff42ebd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# TEXT column types +# + +--source type_text.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt new file mode 100644 index 0000000000000..6ad42e58aa22c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 --rocksdb_records_in_range=50 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test new file mode 100644 index 0000000000000..5fdc4bff39d5d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_text_indexes.test @@ -0,0 +1,171 @@ +--source include/have_rocksdb.inc +--source include/have_debug.inc + +# +# TEXT columns with indexes +# + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK; +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1; + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + + +CREATE TABLE t1 ( + t TEXT, + tt TINYTEXT, + m MEDIUMTEXT, + l LONGTEXT, + PRIMARY KEY t (t(32)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (t,tt,m,l) VALUES +('','','',''), +('a','b','c','d'), +('b','d','c','b'), +('test1','test2','test3','test4'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128)), +('abc','def','ghi','jkl'), +('test2','test3','test4','test5'), +('test3','test4','test5','test6'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128)), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128)); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(t,16) AS f FROM t1 WHERE t IN ('test1','test2') ORDER BY f; + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +SELECT SUBSTRING(t,16) AS f FROM t1 IGNORE INDEX (PRIMARY) WHERE t IN ('test1','test2') ORDER BY f; +DROP TABLE t1; + +--error ER_BLOB_KEY_WITHOUT_LENGTH +CREATE TABLE t1 ( + t TEXT, + tt TINYTEXT, + m MEDIUMTEXT, + l LONGTEXT, + pk TINYTEXT PRIMARY KEY, + UNIQUE INDEX l_tt (l(256),tt(64)) +) ENGINE=rocksdb; + +CREATE TABLE t1 ( + t TEXT, + tt TINYTEXT, + m MEDIUMTEXT, + l LONGTEXT, + pk MEDIUMTEXT, + PRIMARY KEY mt (pk(1)), + INDEX (m(128)) +) ENGINE=rocksdb; + +SHOW INDEX IN t1; + +INSERT INTO t1 (t,tt,m,l,pk) VALUES +('','','','','0'), +('a','b','c','d','1'), +('b','d','c','b','2'), +('test1','test2','test3','test4','3'), +(REPEAT('a',128),REPEAT('b',128),REPEAT('c',128),REPEAT('d',128),'4'), +('abc','def','ghi','jkl','5'), +('test2','test3','test4','test5','6'), +('test3','test4','test5','test6','7'), +(REPEAT('b',128),REPEAT('f',128),REPEAT('e',128),REPEAT('d',128),'8'), +(REPEAT('c',128),REPEAT('b',128),REPEAT('c',128),REPEAT('e',128),'9'); + +--replace_column 9 # +EXPLAIN SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; +SELECT SUBSTRING(m,128) AS f FROM t1 WHERE m = 'test1' ORDER BY f DESC; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b TEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b TINYTEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b MEDIUMTEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGTEXT, + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); + +SELECT b FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "binary" COLLATE "binary", + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +INSERT INTO t1 (b) VALUES +('00000000000000000000000000000000'), +('00000000000000000000000000000001'), +('00000000000000000000000000000002'); +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); + +SELECT hex(b) FROM t1; + +DROP TABLE t1; + +CREATE TABLE t1 ( + b LONGTEXT CHARACTER SET "latin1" COLLATE "latin1_bin", + PRIMARY KEY b (b(32)) +) ENGINE=rocksdb; + +--error ER_DUP_ENTRY +INSERT INTO t1 (b) VALUES (''), (_binary 0x0), (' '); + +INSERT INTO t1 (b) VALUES (''), (_binary 0x0); +SELECT hex(b) FROM t1; + +DROP TABLE t1; + +SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc new file mode 100644 index 0000000000000..a9f9883198ce5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.inc @@ -0,0 +1,75 @@ +# +# VARBINARY column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +eval CREATE TABLE t1 ( + v0 VARBINARY(0) $extra_col_opts, + v1 VARBINARY(1) $extra_col_opts, + v64 VARBINARY(64) $extra_col_opts, + v65000 VARBINARY(65000) $extra_col_opts, + PRIMARY KEY (v64) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +eval CREATE TABLE t2 (v VARBINARY(65532) $extra_col_opts, PRIMARY KEY(v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); + +--sorted_result +SELECT HEX(v0), HEX(v1), HEX(v64), HEX(v65000) FROM t1; + +# Invalid values + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; + +--sorted_result +SELECT HEX(v0), HEX(v1), HEX(v64), LENGTH(HEX(v65000)) FROM t1; + +eval ALTER TABLE t1 ADD COLUMN v65536 VARBINARY(65536) $extra_col_opts; +SHOW COLUMNS IN t1; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test new file mode 100644 index 0000000000000..2ef3da8a499f3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varbinary.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +# +# VARBINARY column types +# + +--source type_varbinary.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc new file mode 100644 index 0000000000000..ce5dbb444d3b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.inc @@ -0,0 +1,77 @@ +# +# VARCHAR column types +# + +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings + +eval CREATE TABLE t1 ( + v0 VARCHAR(0) $extra_col_opts, + v1 VARCHAR(1) $extra_col_opts, + v64 VARCHAR(64) $extra_col_opts, + v65000 VARCHAR(65000) $extra_col_opts, + PRIMARY KEY (v64) +) ENGINE=rocksdb; + +SHOW COLUMNS IN t1; + +CREATE TABLE t2 (v VARCHAR(65532), PRIMARY KEY (v(255))) ENGINE=rocksdb; +SHOW COLUMNS IN t2; + +# Valid values + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','','',''); +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('','y','Once there, double check that an article doesn\'t already exist','Here is a list of recommended books on MariaDB and MySQL. We\'ve provided links to Amazon.com here for convenience, but they can be found at many other bookstores, both online and off. + + If you want to have your favorite MySQL / MariaDB book listed here, please leave a comment. + For developers who want to code on MariaDB or MySQL + + * Understanding MySQL Internals by Sasha Pachev, former MySQL developer at MySQL AB. + o This is the only book we know about that describes the internals of MariaDB / MySQL. A must have for anyone who wants to understand and develop on MariaDB! + o Not all topics are covered and some parts are slightly outdated, but still the best book on this topic. + * MySQL 5.1 Plugin Development by Sergei Golubchik and Andrew Hutchings + o A must read for anyone wanting to write a plugin for MariaDB, written by the Sergei who designed the plugin interface for MySQL and MariaDB! + + For MariaDB / MySQL end users + + * MariaDB Crash Course by Ben Forta + o First MariaDB book! + o For people who want to learn SQL and the basics of MariaDB. + o Now shipping. Purchase at Amazon.com or your favorite bookseller. + + * SQL-99 Complete, Really by Peter Gulutzan & Trudy Pelzer. + o Everything you wanted to know about the SQL 99 standard. Excellent reference book! + o Free to read in the Knowledgebase! + + * MySQL (4th Edition) by Paul DuBois + o The \'default\' book to read if you wont to learn to use MySQL / MariaDB. + + * MySQL Cookbook by Paul DuBois + o A lot of examples of how to use MySQL. As with all of Paul\'s books, it\'s worth its weight in gold and even enjoyable reading for such a \'dry\' subject. + + * High Performance MySQL, Second Edition, By Baron Schwartz, Peter Zaitsev, Vadim Tkachenko, Jeremy D. Zawodny, Arjen Lentz, Derek J. Balling, et al. + o \"High Performance MySQL is the definitive guide to building fast, reliable systems with MySQL. Written by noted experts with years of real-world experience building very large systems, this book covers every aspect of MySQL performance in detail, and focuses on robustness, security, and data integrity. Learn advanced techniques in depth so you can bring out MySQL\'s full power.\" (From the book description at O\'Reilly) + + * MySQL Admin Cookbook + o A quick step-by-step guide for MySQL users and database administrators to tackle real-world challenges with MySQL configuration and administration + + * MySQL 5.0 Certification Study Guide, By Paul DuBois, Stefan Hinz, Carsten Pedersen + o This is the official guide to cover the passing of the two MySQL Certification examinations. It is valid till version 5.0 of the server, so while it misses all the features available in MySQL 5.1 and greater (including MariaDB 5.1 and greater), it provides a good basic understanding of MySQL for the end-user. '); + +--sorted_result +SELECT v0,v1,v64,v65000 FROM t1; + +# Invalid values + +INSERT INTO t1 (v0,v1,v64,v65000) VALUES ('y', 'yy', REPEAT('c',65), REPEAT('abcdefghi ',6501)); +INSERT INTO t1 (v0,v1,v64,v65000) SELECT v65000, v65000, CONCAT('a',v65000), CONCAT(v65000,v1) FROM t1; + +--sorted_result +SELECT v0, v1, v64, LENGTH(v65000) FROM t1; + +eval ALTER TABLE t1 ADD COLUMN v65536 VARCHAR(65536) $extra_col_opts; +SHOW COLUMNS IN t1; + +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test new file mode 100644 index 0000000000000..e45b6836f6739 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar.test @@ -0,0 +1,75 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +# +# VARCHAR column types +# + +--echo # +--echo # A basic test whether endspace-aware variable length encoding +--echo # works when in PK +--echo # +create table t1 ( pk varchar(10) primary key, col1 int not null) ; +insert into t1 values ('a', 1); +select * from t1; +drop table t1; + +--source type_varchar.inc + + +--echo # +--echo # Endspace-comparison tests: +--echo # + +--let $character_set_collate=CHARACTER SET latin1 COLLATE latin1_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET utf8 COLLATE utf8_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET ucs2 COLLATE ucs2_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET utf8mb4 COLLATE utf8mb4_bin +--source type_varchar_endspace.inc + +--let $character_set_collate=CHARACTER SET utf16 COLLATE utf16_bin +--source type_varchar_endspace.inc + +create table t1 ( + pk int primary key, + col1 varchar(10) collate utf8mb4_bin not null, + col2 varchar(20), + key(col1) +) engine=rocksdb; + +insert into t1 values (1, 'ab','ab'); +insert into t1 values (2, 'ab\0','ab0'); + +select pk, hex(col1), col2 from t1 force index(col1) order by col1; +select pk, hex(col1), col2 from t1 ignore index(col1) order by col1; +drop table t1; + +# Issue #306 - Do not store trailing spaces for prefixed keys. +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'abcabcabcabcabcabcabcabcabcabcabc '); +--replace_column 9 # +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; + +set @save_rocksdb_checksums_pct = @@global.rocksdb_checksums_pct; +set @save_rocksdb_verify_row_debug_checksums = @@session.rocksdb_verify_row_debug_checksums; +set global rocksdb_checksums_pct = 100; +set session rocksdb_verify_row_debug_checksums = on; +create table t (id int primary key, email varchar(100), KEY email_i (email(30))); +insert into t values (1, 'a'); +--replace_column 9 # +explain select 'email_i' as index_name, count(*) AS count from t force index(email_i); +select 'email_i' as index_name, count(*) AS count from t force index(email_i); +drop table t; +set global rocksdb_checksums_pct = @save_rocksdb_checksums_pct; +set session rocksdb_verify_row_debug_checksums = @save_rocksdb_verify_row_debug_checksums; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc new file mode 100644 index 0000000000000..f813b8871b155 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc @@ -0,0 +1,84 @@ +--echo # +--echo # Issue 257: Sort order for varchars is different between +--echo # MyISAM/InnoDB vs MyRocks +--echo # + +eval create table t1 ( + pk varchar(64) $character_set_collate, + col1 varchar(64), + primary key (pk) +); +insert into t1 values ('a','a'); +--error ER_DUP_ENTRY +insert into t1 values ('a ', 'a-space'); + +insert into t1 values('b ', 'b-2x-space'); +--error ER_DUP_ENTRY +insert into t1 values ('b', 'b'); + +select pk, hex(pk), col1 from t1; + +insert into t1 values ('a\t', 'a-tab'); +insert into t1 values ('a \t', 'a-space-tab'); +select pk, hex(pk), col1 from t1 order by pk; + +--echo # Try longer values +--error ER_DUP_ENTRY +insert into t1 values (concat('a', repeat(' ',10)), 'a-10-x-space'); + +insert into t1 values (concat('c', repeat(' ',10)), 'c-10-x-space'); +select * from t1; + +drop table t1; + +--echo # Secondary index +eval create table t1 ( + pk int not null primary key, + col1 varchar(64) $character_set_collate, + col2 varchar(64), + key (col1) +); +insert into t1 values (0, 'ab', 'a-b'); +insert into t1 values (1, 'a ', 'a-space'); +insert into t1 values (2, 'a', 'a'); +insert into t1 values (3, 'a \t', 'a-tab'); + +--echo # Must show 'using index' for latin1_bin and utf8_bin: +--replace_column 9 # +explain +select col1, hex(col1) from t1; +select col1, hex(col1) from t1; + +--echo # Must show 'using index' for latin1_bin and utf8_bin: +--replace_column 4 # 9 # +explain +select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 where col1 < 'b'; + +delete from t1; +insert into t1 values(10, '', 'empty'); +insert into t1 values(11, repeat(' ', 8), '8x-space'); +insert into t1 values(12, repeat(' ', 16), '16x-space'); +insert into t1 values(13, repeat(' ', 24), '24x-space'); +insert into t1 values(14, concat(repeat(' ', 16),'a'), '16x-space-a'); + +insert into t1 values(21, repeat(' ', 9), '9x-space'); +insert into t1 values(22, repeat(' ',17), '17x-space'); +insert into t1 values(23, repeat(' ',18), '18x-space'); + +--replace_column 4 # 9 # +explain +select pk, col1, hex(col1), length(col1) from t1; +select pk, col1, hex(col1), length(col1) from t1; +drop table t1; + +create table t1 (pk int primary key, a varchar(512), key(a)) engine=rocksdb; +insert into t1 values (1, concat('a', repeat(' ', 300))); +insert into t1 values (2, concat('b', repeat(' ', 300))); + +select pk,length(a) from t1 force index(a) where a < 'zz'; +select pk,length(a),rtrim(a) from t1 force index(a) where a < 'zz'; +select pk,length(a),rtrim(a) from t1 ignore index(a) where a < 'zz'; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test new file mode 100644 index 0000000000000..86ae15924cb85 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_check.test @@ -0,0 +1,171 @@ +--source include/have_rocksdb.inc +--source include/have_debug_sync.inc + +# For GitHub issue#167 -- Unique key check doesn't work + +connect (con1, localhost, root,,); +connect (con2, localhost, root,,); +connect (con3, localhost, root,,); +connection default; + +--disable_warnings +set debug_sync='RESET'; +drop table if exists t1; +--enable_warnings + +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; + +# 1) second insert should be blocked at GetForUpdate(), then returning duplicate key error +# after con1's commit +connection con1; +begin; +insert into t1 values (1,1); + +connection con2; +let $ID= `select connection_id()`; +set session rocksdb_lock_wait_timeout=50; +begin; +send insert into t1 values (1,2); + +connection con1; +let $wait_condition= select 1 from INFORMATION_SCHEMA.PROCESSLIST + where ID = $ID and STATE = "Waiting for row lock"; +--source include/wait_condition.inc +commit; + +connection con2; +--error ER_DUP_ENTRY +reap; +commit; +select * from t1; +truncate table t1; + +# 2) same as 1) but using secondary unique key constraint +connection con1; +begin; +insert into t2 values (1,1,1); + +connection con2; +begin; +send insert into t2 values (2,1,2); + +connection con1; +--source include/wait_condition.inc +commit; + +connection con2; +--error ER_DUP_ENTRY +reap; +commit; +select * from t2; +truncate table t2; + +# 3) similar to 1),2) but rolled back +connection con1; +begin; +insert into t1 values (1,1); + +connection con2; +begin; +send insert into t1 values (1,2); + +connection con1; +--source include/wait_condition.inc +rollback; + +connection con2; +reap; +commit; +select * from t1; +truncate table t1; + +connection con1; +begin; +insert into t2 values (1,1,1); + +connection con2; +begin; +send insert into t2 values (2,1,2); + +connection con1; +--source include/wait_condition.inc +rollback; + +connection con2; +reap; +commit; +select * from t2; +truncate table t2; + + +# 4) simulating T1 GetForUpdate() -> T2 GetForUpdate(). T2 should fail with lock wait timeout. +connection con1; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked1 WAIT_FOR go1'; +send insert into t1 values (1,1); + +connection con2; +set debug_sync='rocksdb.update_write_row_after_unique_check SIGNAL parked2 WAIT_FOR go2'; +send insert into t2 values (1,1,1); + +connection default; +set debug_sync='now WAIT_FOR parked1'; +set debug_sync='now WAIT_FOR parked2'; + +connection con3; +set session rocksdb_lock_wait_timeout=1; +--error ER_LOCK_WAIT_TIMEOUT +insert into t1 values (1,2); +--error ER_LOCK_WAIT_TIMEOUT +insert into t2 values (2,1,2); + +connection default; +set debug_sync='now SIGNAL go1'; +set debug_sync='now SIGNAL go2'; + +connection con1; +reap; + +connection con2; +reap; + +connection default; +--error ER_DUP_ENTRY +insert into t1 values (1,2); +--error ER_DUP_ENTRY +insert into t2 values (2,1,2); +select * from t1; +select * from t2; +# Cleanup +connection default; +set debug_sync='RESET'; +disconnect con1; +disconnect con2; +disconnect con3; +drop table t1, t2; + +# skip_unique checks should skip checks only for tables that don't have +# secondary indexes +connection default; +--disable_warnings +drop table if exists t1,t2,t3; +--enable_warnings + +# table with PK only +create table t1 (id int, value int, primary key (id)) engine=rocksdb; +# table with PK and SK +create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb; +# table with hidden PK +create table t3 (id int, value int) engine=rocksdb; + +SET @old_val = @@session.unique_checks; +set @@session.unique_checks = FALSE; + +insert into t1 values (1, 1), (1, 2); +--error ER_DUP_ENTRY +insert into t2 values (1, 1, 1), (1, 2, 1); +insert into t3 values (1, 1), (1, 1); + +set @@session.unique_checks = @old_val; +# cleanup +drop table t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc new file mode 100644 index 0000000000000..ecfc0d8f73407 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.inc @@ -0,0 +1,198 @@ +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; + +eval CREATE TABLE t1 (id1 INT NOT NULL, id2 INT NOT NULL, id3 VARCHAR(32), + id4 INT, id5 VARCHAR(32), + value1 INT, value2 INT, value3 VARCHAR(32), + PRIMARY KEY (id1, id2) $CF, + UNIQUE INDEX (id2, id1) $CF, + UNIQUE INDEX (id2, id3, id4) $CF, + INDEX (id1) $CF, + INDEX (id3, id1) $CF, + UNIQUE INDEX(id5) $CF, + INDEX (id2, id5)) ENGINE=ROCKSDB; + +--disable_query_log +let $max = 10; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i, $i, $i, $i, $i, $i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +SELECT COUNT(*) FROM t1; + +--echo # Test inserting a key that returns duplicate error +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (1, 1, 11, 11, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (5, 5, 11, 11, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (10, 10, 11, 11, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 1, 1, 1, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 5, 5, 5, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 10, 10, 10, 11, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 11, 11, 11, 1, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 11, 11, 11, 5, 11, 11, 11); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (11, 11, 11, 11, 10, 11, 11, 11); + +--echo # Test updating a key that returns duplicate error +--error ER_DUP_ENTRY +UPDATE t1 SET id2=1, id3=1, id4=1 WHERE id1=2; +--error ER_DUP_ENTRY +UPDATE t1 SET id2=1, id3=1, id4=1; + +SELECT COUNT(*) FROM t1; + +--echo # Test updating a key to itself +UPDATE t1 set id2=id4; +UPDATE t1 set id5=id3, value1=value2; +UPDATE t1 set value3=value1; + +--echo # Test modifying values should not cause duplicates +UPDATE t1 SET value1=value3+1; +UPDATE t1 SET value3=value3 div 2; +UPDATE t1 SET value2=value3; + +SELECT COUNT(*) FROM t1; + +--echo # Test NULL values are considered unique +INSERT INTO t1 VALUES (20, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (21, 20, 20, NULL, NULL, 20, 20, 20); +INSERT INTO t1 VALUES (22, 20, 20, NULL, NULL, 20, 20, 20); + +SELECT COUNT(*) FROM t1; + +--echo # Adding multiple rows where one of the rows fail the duplicate +--echo # check should fail the whole statement +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (23, 23, 23, 23, 23, 23, 23, 23), + (24, 24, 24, 24, 24, 24, 24, 24), + (25, 10, 10, 10, 25, 25, 25, 25), + (26, 26, 26, 26, 26, 26, 26, 26); +SELECT COUNT(*) FROM t1; + +# Test open transactions can prevent duplicate keys +connection con1; +BEGIN; +INSERT INTO t1 VALUES (30, 31, 32, 33, 34, 30, 30, 30); + +connection con2; +BEGIN; + +SELECT COUNT(*) FROM t1; + +--echo # Primary key should prevent duplicate on insert +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (30, 31, 30, 30, 30, 30, 30, 30); + +--echo # Primary key should prevent duplicate on update +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id1=30, id2=31 WHERE id2=10; + +--echo # Unique secondary key should prevent duplicate on insert +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (31, 31, 32, 33, 30, 30, 30, 30); +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (32, 32, 32, 32, 34, 32, 32, 32); + +--echo # Unique secondary key should prevent duplicate on update +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id2=31, id3=32, id4=33 WHERE id2=8; +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id5=34 WHERE id2=8; + +--echo # Adding multiple rows where one of the rows fail the duplicate +--echo # check should fail the whole statement +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), + (36, 36, 36, 36, 36, 36, 36, 36), + (37, 31, 32, 33, 37, 37, 37, 37), + (38, 38, 38, 38, 38, 38, 38, 38); +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (35, 35, 35, 35, 35, 35, 35, 35), + (36, 36, 36, 36, 36, 36, 36, 36), + (37, 37, 37, 37, 34, 37, 37, 37), + (38, 38, 38, 38, 38, 38, 38, 38); + +--echo # NULL values are unique and duplicates in value fields are ignored +INSERT INTO t1 VALUES (37, 31, 32, NULL, 37, 37, 37, 37), + (38, 31, 32, NULL, 38, 37, 37, 37), + (39, 31, 32, NULL, 39, 37, 37, 37); + +SELECT COUNT(*) FROM t1; + +--echo # Fail on duplicate key update for row added in our transaction +--error ER_DUP_ENTRY +UPDATE t1 SET id5=37 WHERE id1=38; + +--echo # Fail on lock timeout for row modified in another transaction +--error ER_LOCK_WAIT_TIMEOUT +UPDATE t1 SET id5=34 WHERE id1=38; + +--echo # NULL values are unique +UPDATE t1 SET id5=NULL WHERE value1 > 37; + +connection con1; +COMMIT; + +connection con2; +COMMIT; + +# Test transaction is reading from latest data +connection con2; +BEGIN; +SELECT COUNT(*) FROM t1; + +connection con1; +BEGIN; +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); + +connection con2; +--echo # When transaction is pending, fail on lock acquisition +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); + +SELECT COUNT(*) FROM t1; + +connection con1; +COMMIT; + +connection con2; +--echo # When transaction is committed, fail on duplicate key +--error ER_DUP_ENTRY,ER_LOCK_DEADLOCK +INSERT INTO t1 VALUES (40, 40, 40, 40, 40, 40, 40, 40); +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (41, 40, 40, 40, 40, 40, 40, 40); + +ROLLBACK; + +SELECT * FROM t1; + +disconnect con1; +disconnect con2; + +connection default; +DROP TABLE t1; + +# Wait till we reached the initial number of concurrent sessions +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test new file mode 100644 index 0000000000000..1dedd75f561b6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec.test @@ -0,0 +1,51 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc + +--let $CF= +--source unique_sec.inc + +--echo # +--echo # Issue #88: Creating unique index over column with duplicate values succeeds +--echo # +create table t1 (pk int primary key, a int) engine=rocksdb; + +insert into t1 values +(1, 1), +(2, 2), +(3, 3), +(4, 1), +(5, 5); + +--error ER_DUP_ENTRY +alter table t1 add unique(a); +drop table t1; + +--echo # +--echo # Issue #111 +--echo # +CREATE TABLE t2 (pk int, a int, PRIMARY KEY (pk, a), UNIQUE KEY (a)) ENGINE=ROCKSDB PARTITION BY KEY (a) PARTITIONS 16; + +INSERT INTO t2 VALUES (1,1); +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (1,1); +--error ER_DUP_ENTRY +INSERT INTO t2 VALUES (2,1); +DROP TABLE t2; + +--echo # +--echo # Issue #491 (https://github.com/facebook/mysql-5.6/issues/491) +--echo # +CREATE TABLE t (a BLOB, PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +DROP TABLE t; + +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +DROP TABLE t; + +CREATE TABLE t (a VARCHAR(255), PRIMARY KEY(a(2)), UNIQUE KEY (a(1))) engine=rocksdb; +INSERT INTO t VALUES('a'); +CHECK TABLE t EXTENDED; +DROP TABLE t; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test new file mode 100644 index 0000000000000..dc0bbd8ec8d60 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unique_sec_rev_cf.test @@ -0,0 +1,4 @@ +--source include/have_rocksdb.inc + +let $CF=COMMENT 'rev:cf'; +--source unique_sec.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test new file mode 100644 index 0000000000000..0c7df020ef5e3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test @@ -0,0 +1,25 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +create table t1 (id int primary key, value int) engine=rocksdb; +insert into t1 values (1,1); + +SET session transaction isolation level read uncommitted; +begin; +--error ER_UNKNOWN_ERROR +insert into t1 values (2,1); +--error ER_UNKNOWN_ERROR +select * from t1 where id=1; +rollback; + +SET session transaction isolation level serializable; +begin; +--error ER_UNKNOWN_ERROR +insert into t1 values (2,1); +--error ER_UNKNOWN_ERROR +select * from t1 where id=1; +rollback; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update.test b/storage/rocksdb/mysql-test/rocksdb/t/update.test new file mode 100644 index 0000000000000..e0a49ee8ca50d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update.test @@ -0,0 +1,72 @@ +--source include/have_rocksdb.inc + +# +# Basic UPDATE statements. +# UPDATE LOW_PRIORITY is covered in update_low_prio test +# UPDATE IGNORE is covered in update_ignore test +# Multi-table update is covered in update_multi test +# + +######################################## +# TODO: +# The results of the transactional part +# are unusual due to "can't-see-own-changes" +######################################## + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +UPDATE t1 SET a=a+100; +--sorted_result +SELECT a,b FROM t1; + +UPDATE t1 SET a=a-100, b=DEFAULT WHERE a>100; +--sorted_result +SELECT a,b FROM t1; + +# ORDER BY and LIMIT +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY b DESC, a ASC LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +DROP TABLE t1; + +# +# Transactional UPDATE +# + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +BEGIN; +UPDATE t1 SET a=a+100; +UPDATE t1 SET a=a-50, b=DEFAULT WHERE a>100; +COMMIT; +--sorted_result +SELECT * FROM t1 ORDER BY pk; + +BEGIN; +UPDATE t1 SET b = 'update' WHERE a <= 4 ORDER BY a DESC, b ASC LIMIT 3; +UPDATE t1 SET b = ''; +ROLLBACK; +SELECT * FROM t1 ORDER BY pk; + +BEGIN; +UPDATE t1 SET b = 'update2' WHERE a <= 100; +SAVEPOINT spt1; +UPDATE t1 SET b = ''; +--error ER_UNKNOWN_ERROR +ROLLBACK TO SAVEPOINT spt1; +UPDATE t1 SET b = 'upd' WHERE a = 10050; +--error ER_UNKNOWN_ERROR +COMMIT; +SELECT * FROM t1 ORDER BY pk; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt new file mode 100644 index 0000000000000..ba9364e152376 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=1000 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test new file mode 100644 index 0000000000000..0208e1b3563c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_ignore.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +# +# UPDATE IGNORE +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=rocksdb; + +INSERT INTO t2 (c,d) SELECT b, a FROM t1; + +UPDATE IGNORE t1 SET b = 'upd1' WHERE b IS NOT NULL ORDER BY a LIMIT 1; +SELECT a,b FROM t1 ORDER BY pk; + +--error ER_SUBQUERY_NO_1_ROW +UPDATE t1, t2 SET b = 'upd2a', c = 'upd2b' + WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); + +UPDATE IGNORE t1, t2 SET b = 'upd2a', c = 'upd2b' + WHERE c < b OR a != ( SELECT 1 UNION SELECT 2 ); + +SELECT a,b FROM t1 ORDER BY pk; +SELECT c,d FROM t2 ORDER BY pk; + +# Cleanup +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test b/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test new file mode 100644 index 0000000000000..1cb2f12804d1e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_multi.test @@ -0,0 +1,15 @@ +--source include/have_rocksdb.inc + +# +# Multi-table UPDATE statements. +# + +--disable_warnings +DROP TABLE IF EXISTS t1,t2; +--enable_warnings + +let $i= 0; +while ($i < 10) { + --source update_multi_exec.inc + inc $i; +} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc b/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc new file mode 100644 index 0000000000000..40d736693c648 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_multi_exec.inc @@ -0,0 +1,27 @@ +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES +(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(10000,'foobar'); +INSERT INTO t1 (a,b) SELECT a, b FROM t1; + +CREATE TABLE t2 (c CHAR(8), d INT, pk INT AUTO_INCREMENT +PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t2 (c,d) SELECT b, a FROM t1; + +UPDATE t1 STRAIGHT_JOIN t2 SET t1.a = t2.d+100, t2.c = 'multi' +WHERE c < b AND a + d != 1; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +UPDATE t2 STRAIGHT_JOIN t1 SET t2.d = DEFAULT +WHERE c = 'foobar' and b = c; +--sorted_result +SELECT a,b FROM t1; +--sorted_result +SELECT c,d FROM t2; + +# Cleanup +DROP TABLE t1, t2; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test b/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test new file mode 100644 index 0000000000000..c53eb9be85c1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/update_with_keys.test @@ -0,0 +1,78 @@ +--source include/have_rocksdb.inc + +# +# UPDATE statements for tables with keys +# + +############################################# +# TODO: +# The test doesn't work quite as expected, +# apparently due to "can't see own changes" +############################################# + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, INDEX(b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'x'),(7,'y'),(8,'z'); +UPDATE t1 SET a=100, b='f' WHERE b IN ('b','c'); +UPDATE t1 SET b='m' WHERE b = 'f'; +UPDATE t1 SET b='z' WHERE a < 2; +UPDATE t1 SET b=''; +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +--echo # RocksDB: skip the test for secondary UNIQUE keys. +--disable_parsing +--error ER_GET_ERRMSG +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a)) ENGINE=innodb; + + +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a'); +UPDATE t1 SET a=a+200; +UPDATE t1 SET a=0 WHERE a > 250; +--error ER_DUP_ENTRY +UPDATE t1 SET a=205 WHERE a=200; +UPDATE t1 SET a=12345 ORDER BY a, b LIMIT 1; +--sorted_result +SELECT a,b FROM t1; + +--error ER_DUP_ENTRY +UPDATE t1 SET a=80 WHERE a IN (202,203); +--sorted_result +SELECT a,b FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a INT, b CHAR(8), pk INT AUTO_INCREMENT PRIMARY KEY, UNIQUE INDEX(a,b)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(100,'a'),(6,'f'); +UPDATE t1 SET a=6 WHERE a=3; +--error ER_DUP_ENTRY +UPDATE t1 SET a=100 WHERE a=1; +--error ER_DUP_ENTRY +UPDATE t1 SET a=4, b='d' WHERE b='f'; +UPDATE t1 SET a=a+1; +--sorted_result +SELECT a,b FROM t1; +--error ER_DUP_ENTRY +UPDATE t1 SET b='z'; +DROP TABLE t1; + +--enable_parsing + +CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(0,'f'),(100,'a'); +UPDATE t1 SET a=a+200; +UPDATE t1 SET a=0 WHERE a > 250; +--error ER_DUP_ENTRY +UPDATE t1 SET a=205 WHERE a=200; +UPDATE t1 SET a=12345 ORDER BY a DESC, b LIMIT 1; + +--sorted_result +SELECT a,b FROM t1; + +--error ER_DUP_ENTRY +UPDATE t1 SET a=80 WHERE a IN (202,203); +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test new file mode 100644 index 0000000000000..87d31d2e2d9d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test @@ -0,0 +1,48 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("rocksdb"); +call mtr.add_suppression("Aborting"); + +# Issue221 +# Turning on both --rocksdb-allow-mmap-reads and --rocksdb-use-direct-reads +# caused an assertion in RocksDB. Now it should not be allowed and the +# server will not start with that configuration + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart +# the server until it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Attempt to restart the server with invalid options +--exec echo "restart:--rocksdb_use_direct_reads=1 --rocksdb_allow_mmap_reads=1" >$_expect_file_name +--sleep 0.1 # Wait 100ms - that is how long the sleep is in check_expected_crash_and_restart +--exec echo "restart:" >$_expect_file_name + +# Cleanup +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message +let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_PATTERN=enable both use_direct_reads; +source include/search_pattern_in_file.inc; + +# Repeat with --rocksdb-use-direct-writes +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +--exec echo "restart:--rocksdb_use_direct_writes=1 --rocksdb_allow_mmap_writes=1" >$_expect_file_name +--sleep 0.1 +--exec echo "restart:" >$_expect_file_name + +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +let SEARCH_PATTERN=enable both use_direct_writes; +source include/search_pattern_in_file.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test new file mode 100644 index 0000000000000..e9dcc60415527 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/validate_datadic.test @@ -0,0 +1,95 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc + +# +# Validate that the server starts when everything is okay, but detects errors +# if a table exists in the data dictionary but not as an .frm or vice versa. +# The default mode causes these failures to keep the server from starting, but +# this is problematic for the test as a server start failure is not easily +# trappable. Instead use the mode where it will detect the problem and report +# it in the log bug still start: --rocksdb_validate_tables=2 +# + +call mtr.add_suppression('RocksDB: Schema mismatch'); + +CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB; +CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4; + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart the +# server until it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name + +# Send shutdown to the connected server and give it 10 seconds to die before +# zapping it +shutdown_server 10; + +# Write file to make mysql-test-run.pl start up the server again +--exec echo "restart" >$_expect_file_name +--sleep 5 + +# Turn on reconnect +--enable_reconnect + +# Call script that will poll the server waiting for it to be back online again +--source include/wait_until_connected_again.inc + +# Turn off reconnect again +--disable_reconnect + +# Now shut down again and rename one of the .frm files +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Rename the file +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp + +# Attempt to restart the server +--exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name + +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message +--echo "Expect errors that we are missing two .frm files" +--let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err +--let SEARCH_PATTERN=Schema mismatch +--source include/search_pattern_in_file.inc + +# Now shut down again and rename one the .frm file back and make a copy of it +--exec echo "wait" >$_expect_file_name +shutdown_server 10; + +# Rename the file +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm +--move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t2.frm +--copy_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm + +# Attempt to restart the server +--exec echo "restart:--rocksdb_validate_tables=2" >$_expect_file_name + +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# We should now have an error message for the second case +--echo "Expect an error that we have an extra .frm file" +--let SEARCH_PATTERN=Schema mismatch +--source include/search_pattern_in_file.inc + +# Shut down an clean up +--exec echo "wait" >$_expect_file_name +shutdown_server 10; +--remove_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t1_dummy.frm +--exec echo "restart" >$_expect_file_name +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +--disable_warnings +DROP TABLE t1, t2; +--enable_warnings + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test new file mode 100644 index 0000000000000..804889dcec909 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -0,0 +1,43 @@ +--source include/have_rocksdb.inc +--source include/not_windows.inc # MDEV-12427 + +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=true; + +create table aaa (id int primary key, i int) engine rocksdb; +set @save_rocksdb_flush_log_at_trx_commit= @@global.rocksdb_flush_log_at_trx_commit; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; +sleep 30; +select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(1,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(2,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(3,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; + +SET LOCAL rocksdb_flush_log_at_trx_commit=1; +insert aaa(id, i) values(4,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(5,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; +insert aaa(id, i) values(6,1); +select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; + +SET GLOBAL rocksdb_background_sync=on; +SET LOCAL rocksdb_flush_log_at_trx_commit=0; +insert aaa(id, i) values(7,1); + +let $status_var=rocksdb_wal_synced; +let $status_var_value=`select @a+4`; +source include/wait_for_status_var.inc; + +truncate table aaa; + +# Cleanup +drop table aaa; +SET GLOBAL rocksdb_flush_log_at_trx_commit=@save_rocksdb_flush_log_at_trx_commit; +SET GLOBAL rocksdb_write_disable_wal=false; +SET GLOBAL rocksdb_write_ignore_missing_column_families=false; +SET GLOBAL rocksdb_background_sync=off; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf b/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf new file mode 100644 index 0000000000000..101dbce2385a7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/base.cnf @@ -0,0 +1,25 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf +!include include/default_client.cnf + +[mysqld.1] +rocksdb +default-storage-engine=rocksdb +skip-innodb +default-tmp-storage-engine=MyISAM +binlog_format=row + +[mysqld.2] +rocksdb +default-storage-engine=rocksdb +skip-innodb +default-tmp-storage-engine=MyISAM +binlog_format=row + +[ENV] +MASTER_MYPORT= @mysqld.1.port +MASTER_MYSOCK= @mysqld.1.socket + +SLAVE_MYPORT= @mysqld.2.port +SLAVE_MYSOCK= @mysqld.2.socket + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc new file mode 100644 index 0000000000000..947bf0270e289 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/cleanup.inc @@ -0,0 +1,3 @@ + +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh new file mode 100755 index 0000000000000..6174e5d1864af --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_slocket_socket.sh @@ -0,0 +1,2 @@ +src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/" +python -c "import socket as s; sock = s.socket(s.AF_UNIX); sock.bind('${src_data_dir}/slocket')" diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh new file mode 100755 index 0000000000000..f3836ab75e58f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data.sh @@ -0,0 +1,43 @@ +set -e + +# Insert 100 batches of 100 records each to a table with following schema: +# create table db1.t1 ( +# `id` int(10) not null auto_increment, +# `k` int(10), +# `data` varchar(2048), +# primary key (`id`), +# key (`k`) +# ) engine=innodb; + +MAX_INSERTS=100 +MAX_ROWS_PER_INSERT=100 + +insertData() { + for ((i=1; i<=$MAX_INSERTS; i++)); + do + stmt='INSERT INTO db1.t1 values' + for ((j=1; j<=$MAX_ROWS_PER_INSERT; j++)); + do + k=$RANDOM + data=$(head -c 2048 /dev/urandom|tr -cd 'a-zA-Z0-9') + stmt=$stmt' (NULL, '$k', "'$data'")' + if [ $j -lt $MAX_ROWS_PER_INSERT ]; then + stmt=$stmt',' + fi + done + stmt=$stmt';' + $MYSQL --defaults-group-suffix=.1 -e "$stmt" + done +} + +NUM_PARALLEL_INSERTS=25 +pids=() +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + insertData & + pids+=($!) +done +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + wait ${pids[k]} +done diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh new file mode 100755 index 0000000000000..a4e4afab9d4cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_and_run.sh @@ -0,0 +1,9 @@ +set -e + +# Initially loads a chunk of data. +# Then start loading another chunk of data, +# while simultaneously running a backup + +suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +suite/rocksdb_hotbackup/include/load_data.sh 2>&1 & +suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh new file mode 100755 index 0000000000000..ed0b3cb5c1cda --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/load_data_slocket.sh @@ -0,0 +1,43 @@ +set -e + +# Insert 10 batches of 10 records each to a table with following schema: +# create table slocket.t1 ( +# `id` int(10) not null auto_increment, +# `k` int(10), +# `data` varchar(2048), +# primary key (`id`), +# key (`k`) +# ) engine=innodb; + +MAX_INSERTS=10 +MAX_ROWS_PER_INSERT=10 + +insertData() { + for ((i=1; i<=$MAX_INSERTS; i++)); + do + stmt='INSERT INTO slocket.t1 values' + for ((j=1; j<=$MAX_ROWS_PER_INSERT; j++)); + do + k=$RANDOM + data=$(head -c 2048 /dev/urandom|tr -cd 'a-zA-Z0-9') + stmt=$stmt' (NULL, '$k', "'$data'")' + if [ $j -lt $MAX_ROWS_PER_INSERT ]; then + stmt=$stmt',' + fi + done + stmt=$stmt';' + $MYSQL --defaults-group-suffix=.1 -e "$stmt" + done +} + +NUM_PARALLEL_INSERTS=25 +pids=() +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + insertData & + pids+=($!) +done +for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++)); +do + wait ${pids[k]} +done diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh new file mode 100755 index 0000000000000..0c2c71aad68c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/remove_slocket_socket.sh @@ -0,0 +1,2 @@ +src_data_dir="${MYSQLTEST_VARDIR}/mysqld.1/data/" +rm "${src_data_dir}/slocket" diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc new file mode 100644 index 0000000000000..26c3f2ce7f18b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup.inc @@ -0,0 +1,16 @@ +--let $rpl_server_count= 2 +--let $rpl_topology= none +--source include/rpl_init.inc +--source include/rpl_default_connections.inc + +connection server_1; +create database db1; + +create table db1.t1 ( + `id` int(10) not null auto_increment, + `k` int(10), + `data` varchar(2048), + primary key (`id`), + key (`k`) +) engine=rocksdb; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh new file mode 100755 index 0000000000000..18e1feeda96b0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid.sh @@ -0,0 +1,20 @@ +set -e + +binlog_line=($(grep -o "Last binlog file position [0-9]*, file name .*\.[0-9]*" ${MYSQLTEST_VARDIR}/log/mysqld.2.err | tail -1)) +binlog_pos=${binlog_line[4]%?} +binlog_file=${binlog_line[7]} + +sql="show gtid_executed in '$binlog_file' from $binlog_pos" +result=($($MYSQL --defaults-group-suffix=.1 -e "$sql")) +gtid_executed=${result[1]} + +sql="reset master;" +sql="$sql reset slave;" +sql="$sql change master to master_host='127.0.0.1', master_port=${MASTER_MYPORT}, master_user='root', master_auto_position=1, master_connect_retry=1;" +sql="$sql set global gtid_purged='$gtid_executed';" +sql="$sql start slave;" +sql="$sql stop slave;" +sql="$sql change master to master_auto_position=0;" +sql="$sql start slave;" +$MYSQL --defaults-group-suffix=.2 -e "$sql" +echo "$sql" > ${MYSQL_TMP_DIR}/gtid_stmt diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc new file mode 100644 index 0000000000000..75dc31964da8f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc @@ -0,0 +1,4 @@ +--exec suite/rocksdb_hotbackup/include/setup_replication_gtid.sh + +let $slave_sync_timeout = 1800; +source include/wait_for_slave_to_sync_with_master.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc new file mode 100644 index 0000000000000..ce889164219a6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/setup_slocket.inc @@ -0,0 +1,10 @@ +connection server_1; +create database slocket; + +create table slocket.t1 ( + `id` int(10) not null auto_increment, + `k` int(10), + `data` varchar(2048), + primary key (`id`), + key (`k`) +) engine=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh new file mode 100755 index 0000000000000..ef505e4b888b7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh @@ -0,0 +1,76 @@ +if [ "$STREAM_TYPE" == 'wdt' ]; then + which wdt >/dev/null 2>&1 + if [ $? -ne 0 ]; then + # change to tar if wdt is not installed + STREAM_TYPE='tar' + fi +fi + +set -e + +# Takes a full backup from server_1 to server_2 +# using myrocks_hotbackup streaming + +checkpoint_dir="${MYSQLTEST_VARDIR}/checkpoint" +backup_dir="${MYSQLTEST_VARDIR}/backup" +dest_data_dir="${MYSQLTEST_VARDIR}/mysqld.2/data/" + +mysql_dir=$(echo $MYSQL | awk '{print $1}' | xargs dirname) +PATH=$mysql_dir:$PATH + +mkdir -p $checkpoint_dir +rm -rf $checkpoint_dir/* + +mkdir -p $backup_dir +rm -rf $backup_dir/* +# delete and recreate the dest dir to make sure all hidden files +# and directories (such as .rocksdb) are blown away +rm -rf $dest_data_dir/ +mkdir $dest_data_dir + +COPY_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_copy_log" + +if [ "$STREAM_TYPE" == 'tar' ]; then + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \ + --stream=tar --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG | tar -xi -C $backup_dir" +elif [ "$STREAM_TYPE" == 'xbstream' ]; then + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \ + --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG | xbstream -x \ + --directory=$backup_dir" +elif [ "$STREAM_TYPE" == "xbstream_socket" ]; then + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --socket=${MASTER_MYSOCK} \ + --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG | xbstream -x \ + --directory=$backup_dir" +else + BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --stream=wdt \ + --port=${MASTER_MYPORT} --destination=localhost --backup_dir=$backup_dir \ + --avg_mbytes_per_sec=10 --interval=5 \ + --extra_wdt_sender_options='--block_size_mbytes=1' \ + --checkpoint_dir=$checkpoint_dir 2> \ + $COPY_LOG" +fi + +echo "myrocks_hotbackup copy phase" +eval "$BACKUP_CMD" +if [ $? -ne 0 ]; then + tail $COPY_LOG + exit 1 +fi + +mkdir ${backup_dir}/test # TODO: Fix skipping empty directories + +MOVEBACK_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_moveback_log" + +echo "myrocks_hotbackup move-back phase" +$MYSQL_MYROCKS_HOTBACKUP --move_back --datadir=$dest_data_dir \ + --rocksdb_datadir=$dest_data_dir/.rocksdb \ + --rocksdb_waldir=$dest_data_dir/.rocksdb \ + --backup_dir=$backup_dir > $MOVEBACK_LOG 2>&1 + +if [ $? -ne 0 ]; then + tail $MOVEBACK_LOG + exit 1 +fi diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf b/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf new file mode 100644 index 0000000000000..bd9af04c8130c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/my.cnf @@ -0,0 +1,2 @@ +# Use settings from xb_base.cnf +!include base.cnf diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result new file mode 100644 index 0000000000000..6cec6ca5d6983 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/gtid.result @@ -0,0 +1,23 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +stop slave; +start slave; +select count(*) from db1.t1; +count(*) +500000 +drop database db1; +stop slave; +reset slave; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result new file mode 100644 index 0000000000000..9accd18b29424 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/slocket.result @@ -0,0 +1,41 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +create database slocket; +create table slocket.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +select count(*) from slocket.t1; +count(*) +2500 +drop database slocket; +drop database db1; +drop database slocket; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result new file mode 100644 index 0000000000000..d3f2ebc4e6f40 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/stream.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result new file mode 100644 index 0000000000000..d3f2ebc4e6f40 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/wdt.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result new file mode 100644 index 0000000000000..d3f2ebc4e6f40 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result new file mode 100644 index 0000000000000..d3f2ebc4e6f40 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream_socket.result @@ -0,0 +1,20 @@ +include/rpl_init.inc [topology=none] +include/rpl_default_connections.inc +create database db1; +create table db1.t1 ( +`id` int(10) not null auto_increment, +`k` int(10), +`data` varchar(2048), +primary key (`id`), +key (`k`) +) engine=rocksdb; +include/rpl_stop_server.inc [server_number=2] +myrocks_hotbackup copy phase +myrocks_hotbackup move-back phase +include/rpl_start_server.inc [server_number=2] +select count(*) from db1.t1; +count(*) +250000 +drop database db1; +drop database db1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt new file mode 100644 index 0000000000000..9d7af67eec97e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-master.opt @@ -0,0 +1 @@ +--gtid_mode=on --log_slave_updates=on --enforce_gtid_consistency=on diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt new file mode 100644 index 0000000000000..9d7af67eec97e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid-slave.opt @@ -0,0 +1 @@ +--gtid_mode=on --log_slave_updates=on --enforce_gtid_consistency=on diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test new file mode 100644 index 0000000000000..f9d58da093edd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/gtid.test @@ -0,0 +1,47 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/load_data_and_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +let $num_rows = `select count(*) from db1.t1`; +let $max_id = `select id from db1.t1 order by id desc limit 1`; + +if($num_rows != $max_id) { + echo Number of rows($num_rows) and last_id($max_id) does not match; +} +if($num_rows < 250000) { + echo Number of rows($num_rows) is less than 250000; +} +if($num_rows > 500000) { + echo Number of rows($num_rows) is more than 500000; +} + +--source suite/rocksdb_hotbackup/include/setup_replication_gtid_and_sync.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +let $checksum1 = `checksum tables db1.t1`; +connection server_2; +let $checksum2 = `checksum tables db1.t1`; + +if($checksum1 != $checksum2) { + echo Checksums ($checksum1 and $checksum2) do not match; +} + +connection server_1; +drop database db1; +sync_slave_with_master; +connection server_2; +stop slave; +reset slave; + +source suite/rocksdb_hotbackup/include/cleanup.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test new file mode 100644 index 0000000000000..14ad8d233765e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/slocket.test @@ -0,0 +1,46 @@ +source suite/rocksdb_hotbackup/include/setup.inc; +source suite/rocksdb_hotbackup/include/setup_slocket.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--exec suite/rocksdb_hotbackup/include/load_data_slocket.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; +select count(*) from slocket.t1; + +connection server_1; +drop database slocket; +connection server_2; +drop database db1; +drop database slocket; + +--exec sleep 2 +--exec suite/rocksdb_hotbackup/include/create_slocket_socket.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +--exec suite/rocksdb_hotbackup/include/remove_slocket_socket.sh 2>&1 + +source suite/rocksdb_hotbackup/include/cleanup.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test new file mode 100644 index 0000000000000..2b999f3fce77c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/stream.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec STREAM_TYPE=tar suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test new file mode 100644 index 0000000000000..2d2ed89112b07 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/wdt.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--exec STREAM_TYPE=wdt suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test new file mode 100644 index 0000000000000..9bfab4252c42e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec STREAM_TYPE=xbstream suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test new file mode 100644 index 0000000000000..28edff072e709 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream_socket.test @@ -0,0 +1,22 @@ + +source suite/rocksdb_hotbackup/include/setup.inc; + +--exec suite/rocksdb_hotbackup/include/load_data.sh 2>&1 +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--exec STREAM_TYPE=xbstream_socket suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +connection server_2; +select count(*) from db1.t1; + +connection server_1; +drop database db1; +connection server_2; +drop database db1; + +source suite/rocksdb_hotbackup/include/cleanup.inc; + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/combinations b/storage/rocksdb/mysql-test/rocksdb_rpl/combinations new file mode 100644 index 0000000000000..f09d338c35731 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/combinations @@ -0,0 +1,2 @@ +[row] +binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc new file mode 100644 index 0000000000000..8f03c16e2f1cb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/include/rpl_no_unique_check_on_lag.inc @@ -0,0 +1,71 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc +--source include/not_embedded.inc +--source include/not_valgrind.inc + +call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1"); +call mtr.add_suppression(".*Worker.*failed executing transaction"); +call mtr.add_suppression(".*The slave coordinator and worker threads are stopped"); + +--disable_warnings +drop table if exists t1; +--enable_warnings + +connection master; +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +sync_slave_with_master; +--let $rsbm = query_get_value(select @@global.reset_seconds_behind_master, @@global.reset_seconds_behind_master, 1) +set global reset_seconds_behind_master=1; + +connection slave; +INSERT INTO t1 VALUES(1, 0); +INSERT INTO t1 VALUES(2, 0); +INSERT INTO t1 VALUES(3, 0); + +connection master; +sync_slave_with_master; +connection master; +INSERT INTO t1 VALUES(1, 1); + +connection slave; +--let $slave_sql_errno= 1062 +--let $not_switch_connection= 0 +--source include/wait_for_slave_sql_error_and_skip.inc +set global reset_seconds_behind_master=0; +--source include/stop_slave_io.inc + +connection master; +INSERT INTO t1 values (4,0); +--sleep 11 +INSERT INTO t1 VALUES(2, 1); + +connection slave; +--source include/start_slave_io.inc + +connection master; +sync_slave_with_master; + +connection slave; +set global reset_seconds_behind_master=1; + +connection master; +insert into t1 values (5,0); +--sleep 1 +sync_slave_with_master; + +connection master; +INSERT INTO t1 VALUES(3, 1); + +connection slave; +--let $slave_sql_errno= 1062 +--let $not_switch_connection= 0 +--source include/wait_for_slave_sql_error_and_skip.inc + +--echo # +--echo # Cleanup +--echo # + +connection master; +DROP TABLE t1; +eval set global reset_seconds_behind_master=$rsbm; +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result new file mode 100644 index 0000000000000..31777c45c68aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/consistent_snapshot_mixed_engines.result @@ -0,0 +1,68 @@ +DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; +create table i1 (id int primary key , value int) engine=innodb; +create table r1 (id int primary key , value int) engine=rocksdb; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +insert into i1 values (1,1); +insert into r1 values (1,1); +connection con1; +select * from i1; +id value +select * from r1; +id value +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1115 uuid:1-5 +connection con2; +insert into i1 values (2,2); +insert into r1 values (2,2); +connection con1; +select * from i1; +id value +1 1 +2 2 +select * from r1; +id value +1 1 +connection con2; +insert into i1 values (3,2); +insert into r1 values (3,2); +connection con1; +select * from i1; +id value +1 1 +2 2 +select * from r1; +id value +1 1 +START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 2015 uuid:1-9 +connection con2; +insert into r1 values (4,4); +connection con1; +select * from r1; +id value +1 1 +2 2 +3 2 +4 4 +connection con2; +insert into r1 values (5,5); +connection con1; +select * from r1; +id value +1 1 +2 2 +3 2 +4 4 +drop table i1; +drop table r1; +connection default; +disconnect con1; +disconnect con2; +reset master; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result new file mode 100644 index 0000000000000..7a7400f17e1af --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/multiclient_2pc.result @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb; +'con1' +SET SESSION debug="d,crash_commit_after_log"; +SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go'; +insert into t1 values (1, 1, "iamtheogthealphaandomega");; +'con2' +insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; +SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; +SET GLOBAL SYNC_BINLOG = 0; +SET DEBUG_SYNC='now WAIT_FOR parked'; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET GLOBAL ROCKSDB_WRITE_SYNC = ON; +SET GLOBAL SYNC_BINLOG = 1; +insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush"); +SET DEBUG_SYNC='now SIGNAL go'; +**found 'prepare' log entry** +**found 'commit' log entry** +select * from t1 where a=1; +a b c +1 1 iamtheogthealphaandomega +select count(*) from t1; +count(*) +1000000 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result new file mode 100644 index 0000000000000..6d061e99846c1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_crash_safe_wal_corrupt.result @@ -0,0 +1,135 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists x; +select @@binlog_format; +@@binlog_format +ROW +create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into x values (1,1,1); +insert into x values (2,1,1); +insert into x values (3,1,1); +insert into x values (4,1,1); +insert into x values (5,1,1); +select @@global.gtid_executed; +@@global.gtid_executed + + +--- slave state before crash --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, one transaction recovered--- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid + +--- slave state after restart, slave start --- +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +insert into x values (6,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +insert into x values (7,1,1); +insert into x values (8,1,1); +insert into x values (9,1,1); +insert into x values (10,1,1); +insert into x values (11,1,1); +insert into x values (12,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed + +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +drop table x; +include/rpl_end.inc +Binlog Info Found diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result new file mode 100644 index 0000000000000..352ceff236c22 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe.result @@ -0,0 +1,361 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Recovery from master pos"); +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 mtr uuid:1 +2 test uuid:4 +SET GLOBAL debug = '+d,crash_before_update_pos'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 mtr uuid:1 +2 test uuid:4 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 mtr uuid:1 +2 test uuid:7 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_after_update_pos_before_apply'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_before_writing_xid'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,half_binlogged_transaction'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_before'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_after_log'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_after_prepare'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_reset.inc +create table t1(a int, PRIMARY KEY(a)) ENGINE=ROCKSDB; +insert into t1 values(1); +insert into t1 values(2); +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +SET GLOBAL debug = '+d,crash_commit_after'; +insert into t1 values(3); +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:3 +use test; +select * from t1; +a +1 +2 +change master to master_auto_position = 1; +include/start_slave.inc +rename table t1 to test1; +use test; +select * from test1; +a +1 +2 +3 +use test; +select * from test1; +a +1 +2 +3 +drop table test1; +include/stop_slave.inc +change master to master_auto_position = 0; +include/start_slave.inc +use mysql; +select * from slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result new file mode 100644 index 0000000000000..e765e338cb5bc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_crash_safe_wal_corrupt.result @@ -0,0 +1,140 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists x; +select @@binlog_format; +@@binlog_format +ROW +create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into x values (1,1,1); +insert into x values (2,1,1); +insert into x values (3,1,1); +insert into x values (4,1,1); +insert into x values (5,1,1); +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-7 + +--- slave state before crash --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-7 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:7 +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, one transaction recovered--- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-6 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:6 + +--- slave state after restart, slave start --- +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-7 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:7 +insert into x values (6,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-8 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:8 +insert into x values (7,1,1); +insert into x values (8,1,1); +insert into x values (9,1,1); +insert into x values (10,1,1); +insert into x values (11,1,1); +insert into x values (12,1,1); +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-14 +include/rpl_start_server.inc [server_number=2] + +--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 --- +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +include/start_slave.inc +select * from x; +id value value2 +1 1 1 +2 1 1 +3 1 1 +4 1 1 +5 1 1 +6 1 1 +7 1 1 +8 1 1 +9 1 1 +10 1 1 +11 1 1 +12 1 1 +select @@global.gtid_executed; +@@global.gtid_executed +uuid:1-14 +select * from mysql.slave_gtid_info; +Id Database_name Last_gtid +1 test uuid:14 +drop table x; +include/rpl_end.inc +Binlog Info Found diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result new file mode 100644 index 0000000000000..b2703ee0cbb47 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_gtid_rocksdb_sys_header.result @@ -0,0 +1,16 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +create table t1 (a int primary key) engine=rocksdb; +insert into t1 values(1); +SET GLOBAL debug = '+d,crash_before_writing_xid'; +insert into t1 values(2); +ERROR HY000: Lost connection to MySQL server during query +include/rpl_reconnect.inc +SET GLOBAL debug = ``; +include/start_slave.inc +RocksDB: Last MySQL Gtid master_uuid:2 +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result new file mode 100644 index 0000000000000..905b56dacb54a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag.result @@ -0,0 +1,34 @@ +# +# Ensure skip_unique_check is set when lag exceeds lag_threshold +# +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1"); +call mtr.add_suppression(".*Worker.*failed executing transaction"); +call mtr.add_suppression(".*The slave coordinator and worker threads are stopped"); +drop table if exists t1; +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +set global reset_seconds_behind_master=1; +INSERT INTO t1 VALUES(1, 0); +INSERT INTO t1 VALUES(2, 0); +INSERT INTO t1 VALUES(3, 0); +INSERT INTO t1 VALUES(1, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +set global reset_seconds_behind_master=0; +include/stop_slave_io.inc +INSERT INTO t1 values (4,0); +INSERT INTO t1 VALUES(2, 1); +include/start_slave_io.inc +set global reset_seconds_behind_master=1; +insert into t1 values (5,0); +INSERT INTO t1 VALUES(3, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +# +# Cleanup +# +DROP TABLE t1; +set global reset_seconds_behind_master=1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result new file mode 100644 index 0000000000000..6c58cb16fed53 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_no_unique_check_on_lag_mts.result @@ -0,0 +1,31 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Slave SQL: Could not execute Write_rows event on table test.t1"); +call mtr.add_suppression(".*Worker.*failed executing transaction"); +call mtr.add_suppression(".*The slave coordinator and worker threads are stopped"); +drop table if exists t1; +CREATE TABLE t1 (id int primary key, value int) engine=RocksDB; +set global reset_seconds_behind_master=1; +INSERT INTO t1 VALUES(1, 0); +INSERT INTO t1 VALUES(2, 0); +INSERT INTO t1 VALUES(3, 0); +INSERT INTO t1 VALUES(1, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +set global reset_seconds_behind_master=0; +include/stop_slave_io.inc +INSERT INTO t1 values (4,0); +INSERT INTO t1 VALUES(2, 1); +include/start_slave_io.inc +set global reset_seconds_behind_master=1; +insert into t1 values (5,0); +INSERT INTO t1 VALUES(3, 1); +include/wait_for_slave_sql_error_and_skip.inc [errno=1062] +# +# Cleanup +# +DROP TABLE t1; +set global reset_seconds_behind_master=1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result new file mode 100644 index 0000000000000..59d1a2313277f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_2pc_crash_recover.result @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS t1; +create table t1 (a int primary key, msg varchar(255)) engine=rocksdb; +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET SESSION debug="d,crash_commit_after_prepare"; +insert into t1 values (1, 'dogz'); +select * from t1; +a msg +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET SESSION debug="d,crash_commit_after_log"; +insert into t1 values (2, 'catz'), (3, 'men'); +select * from t1; +a msg +2 catz +3 men +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET SESSION debug="d,crash_commit_after"; +insert into t1 values (4, 'cars'), (5, 'foo'); +select * from t1; +a msg +2 catz +3 men +4 cars +5 foo +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; +SET SESSION debug="d,crash_commit_after_log"; +insert into t1 values (6, 'shipz'), (7, 'tankz'); +select * from t1; +a msg +2 catz +3 men +4 cars +5 foo +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; +SET SESSION debug="d,crash_commit_after"; +insert into t1 values (8, 'space'), (9, 'time'); +select * from t1; +a msg +2 catz +3 men +4 cars +5 foo +8 space +9 time +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result new file mode 100644 index 0000000000000..eb2c6cfcda314 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot.result @@ -0,0 +1,222 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +DROP TABLE IF EXISTS t1; +# Establish connection con1 (user=root) +# Establish connection con2 (user=root) +# Establish connection con3 (user=root) +# Establish connection con4 (user=root) +# reset replication to guarantee that master-bin.000001 is used +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +RESET MASTER; +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root"; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +ERROR HY000: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine. +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +ERROR HY000: Only REPEATABLE READ isolation level is supported for START TRANSACTION WITH CONSISTENT SNAPSHOT in RocksDB Storage Engine. +ROLLBACK; +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 531 UUID:1-2 +# Switch to connection con2 +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); +# Switch to connection con1 +SELECT * FROM t1; +a +1 +COMMIT; +SELECT * FROM t1; +a +1 +2 +3 +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 1510 UUID:1-7 +# Switch to connection con2 +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); +# Switch to connection con1 +SELECT * FROM t1; +a +1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +SELECT * FROM t1; +a +1 +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=binlog_pos; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +SELECT * FROM t1; +a +1 +2 +3 +SELECT * FROM t1_backup; +a +1 +2 +3 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +# async queries from con2 +INSERT INTO t1 VALUES(2); +# async queries from con3 +INSERT INTO t1 VALUES(21); +# Switch to connection con1 +# Switch to connection con4 +INSERT INTO t1 VALUES(9); +# Switch to connection con1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# reap async statements +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=binlog_pos; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +# sync and then query slave +ShouldBeZero +0 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000001 3688 UUID:1-18 +# Switch to connection con2 +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); +# Switch to connection con1 +SELECT * FROM t1; +a +1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +SET @@global.gtid_purged='gtid_executed_from_snapshot'; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +SELECT * FROM t1; +a +1 +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root", master_auto_position=1; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +SELECT * FROM t1; +a +1 +2 +3 +SELECT * FROM t1_backup; +a +1 +2 +3 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection con1 +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); +# async queries from con2 +INSERT INTO t1 VALUES(2); +# async queries from con3 +INSERT INTO t1 VALUES(21); +# Switch to connection con1 +# Switch to connection con4 +INSERT INTO t1 VALUES(9); +# Switch to connection con1 +SELECT * INTO OUTFILE '/tmp/rpl_rocksdb_snapshot.out.file' FROM t1; +COMMIT; +# reap async statements +# Switch to slave +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +SET @@global.gtid_purged='gtid_executed_from_snapshot'; +DELETE FROM t1; +LOAD DATA INFILE '/tmp/rpl_rocksdb_snapshot.out.file' INTO TABLE t1; +CHANGE MASTER TO master_host="127.0.0.1",master_port=MASTER_PORT,master_user="root", master_auto_position=1; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +Note 1760 Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +include/start_slave.inc +# sync and then query slave +ShouldBeZero +0 +DROP TABLE t1_backup; +DROP TABLE t1; +# Switch to connection default + close connections con1 and con2 +include/stop_slave.inc +CHANGE MASTER to master_auto_position=0; +include/start_slave.inc +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result new file mode 100644 index 0000000000000..57c1d0822c997 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_snapshot_without_gtid.result @@ -0,0 +1,15 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +create table t1(a int primary key); +FLUSH LOGS; +insert into t1 values(1); +insert into t1 values(2); +FLUSH LOGS; +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +File Position Gtid_executed +master-bin.000003 120 +drop table t1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result new file mode 100644 index 0000000000000..d4920b1470511 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_rocksdb_stress_crash.result @@ -0,0 +1,28 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression(".*"); +include/stop_slave.inc +change master to master_auto_position=1; +include/start_slave.inc +call mtr.add_suppression('Slave: Error dropping database'); +stop slave sql_thread; +insert into test0.benchmark set state='slave is processing load'; +start slave sql_thread; +use test0; +insert into benchmark set state='slave ends load'; +use test; +select * from test1.benchmark into outfile 'benchmark.out'; +select ts from test0.benchmark where state like 'master started load' into @m_0; +select ts from test0.benchmark where state like 'master ends load' into @m_1; +select ts from test0.benchmark where state like 'slave takes on load' into @s_m0; +select ts from test0.benchmark where state like 'slave is supposed to finish with load' into @s_m1; +select ts from test0.benchmark where state like 'slave ends load' into @s_1; +select ts from test0.benchmark where state like 'slave is processing load' into @s_0; +select time_to_sec(@m_1) - time_to_sec(@m_0) as 'delta.out'; +include/stop_slave.inc +change master to master_auto_position=0; +include/start_slave.inc +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result new file mode 100644 index 0000000000000..e0dbc92cdf571 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result @@ -0,0 +1,27 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave"); +set global rpl_skip_tx_api=ON; +set global rocksdb_unsafe_for_binlog=1; +create table t1(a int); +set session binlog_format=STATEMENT; +insert into t1 values(1); +include/wait_for_slave_sql_error.inc [errno=1756] +Last_SQL_Error = 'Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave. rpl_skip_tx_api recovery should only be used when master's binlog format is ROW.' +"Table after error" +select * from t1; +a +set global rpl_skip_tx_api=OFF; +include/start_slave.inc +include/sync_slave_sql_with_master.inc +"Table after error fixed" +select * from t1; +a +1 +drop table t1; +set global rocksdb_unsafe_for_binlog=0; +set global rpl_skip_tx_api=0; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf new file mode 100644 index 0000000000000..ed8c77bcc0b06 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/rpl_1slave_base.cnf @@ -0,0 +1,51 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf +!include include/default_client.cnf + +[mysqld.1] + +# Run the master.sh script before starting this process +#!run-master-sh + +log-bin= master-bin + +loose-innodb + +[mysqld.2] +# Run the slave.sh script before starting this process +#!run-slave-sh + +# Append -slave.opt file to the list of argument used when +# starting the mysqld +#!use-slave-opt +innodb_use_native_aio = 0 + +log-bin= slave-bin +relay-log= slave-relay-bin + +log-slave-updates +master-retry-count= 10 + +# Values reported by slave when it connect to master +# and shows up in SHOW SLAVE STATUS; +report-host= 127.0.0.1 +report-port= @mysqld.2.port +report-user= root + +skip-slave-start + +# Directory where slaves find the dumps generated by "load data" +# on the server. The path need to have constant length otherwise +# test results will vary, thus a relative path is used. +slave-load-tmpdir= ../../tmp + +loose-innodb + + +[ENV] +MASTER_MYPORT= @mysqld.1.port +MASTER_MYSOCK= @mysqld.1.socket + +SLAVE_MYPORT= @mysqld.2.port +SLAVE_MYSOCK= @mysqld.2.socket + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations b/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations new file mode 100644 index 0000000000000..f09d338c35731 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/combinations @@ -0,0 +1,2 @@ +[row] +binlog-format=row diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt new file mode 100644 index 0000000000000..c747adc94d508 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test new file mode 100644 index 0000000000000..acea1903c0578 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/consistent_snapshot_mixed_engines.test @@ -0,0 +1,81 @@ +--source include/have_log_bin.inc +--source include/have_rocksdb.inc +--source include/have_innodb.inc +--enable_connect_log +-- let $uuid = `select @@server_uuid;` + +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +create table i1 (id int primary key , value int) engine=innodb; +create table r1 (id int primary key , value int) engine=rocksdb; + + +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; + +# Without setting engine, this takes both InnoDB and RocksDB snapshots +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +connection con2; +insert into i1 values (1,1); +insert into r1 values (1,1); + +connection con1; +select * from i1; +select * from r1; + +# This takes RocksDB snapshot only but both InnoDB participates in transaction. +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +connection con2; +insert into i1 values (2,2); +insert into r1 values (2,2); + +connection con1; +# takes InnoDB snapshot here so changes after that not visible +select * from i1; +select * from r1; + +connection con2; +insert into i1 values (3,2); +insert into r1 values (3,2); + +connection con1; +select * from i1; +select * from r1; + +# RocksDB also partipates in transaction +-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT INNODB SNAPSHOT; + +connection con2; +insert into r1 values (4,4); + +connection con1; +# takes RocksDB snapshot here so changes after that are not visible +select * from r1; + +connection con2; +insert into r1 values (5,5); + +connection con1; +select * from r1; + +drop table i1; +drop table r1; + +connection default; +disconnect con1; +disconnect con2; +reset master; +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt new file mode 100644 index 0000000000000..c747adc94d508 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test new file mode 100644 index 0000000000000..f47f83b0bd240 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test @@ -0,0 +1,71 @@ +--source include/have_rocksdb.inc +--source include/have_binlog_format_row.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/big_test.inc + +--exec echo > $MYSQLTEST_VARDIR/log/mysqld.1.err + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +create table t1 (a int primary key, b int, c varchar(255)) engine=rocksdb; + +connect (con1, localhost, root,,); +connect (con2, localhost, root,,); + +# On connection one we insert a row and pause after commit marker is written to WAL. +# Connection two then inserts many rows. After connection two +# completes connection one continues only to crash before commit but after +# binlog write. On crash recovery we see that connection one's value +# has been recovered and commited +connection con1; +--echo 'con1' +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_log"; +SET DEBUG_SYNC='rocksdb.prepared SIGNAL parked WAIT_FOR go'; +--error 0,2013 +--send insert into t1 values (1, 1, "iamtheogthealphaandomega"); + +connection con2; +--echo 'con2' +insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); + +# Disable 2PC and syncing for faster inserting of dummy rows +# These rows only purpose is to rotate the binlog +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; +SET GLOBAL SYNC_BINLOG = 0; + +SET DEBUG_SYNC='now WAIT_FOR parked'; +--disable_query_log +--let $pk= 3 +while ($pk < 1000000) { + eval insert into t1 values ($pk, 1, "foobardatagoesheresothatmorelogsrollwhichiswhatwewant"); + --inc $pk +} +--enable_query_log + +# re-enable 2PC an syncing then write to trigger a flush +# before we trigger the crash to simulate full-durability +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET GLOBAL ROCKSDB_WRITE_SYNC = ON; +SET GLOBAL SYNC_BINLOG = 1; + +insert into t1 values (1000000, 1, "i_am_just_here_to_trigger_a_flush"); + +SET DEBUG_SYNC='now SIGNAL go'; + +--enable_reconnect +--source include/wait_until_connected_again.inc + +--exec sleep 60 + +--exec python suite/rocksdb/t/check_log_for_xa.py $MYSQLTEST_VARDIR/log/mysqld.1.err commit,prepare,rollback + +select * from t1 where a=1; +select count(*) from t1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl new file mode 100644 index 0000000000000..a5e4d9d80350a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_check_for_binlog_info.pl @@ -0,0 +1,19 @@ +my $pid_file = $ARGV[0]; +my $log_file = $ARGV[1]; + +open(my $fh, '<', $pid_file) || die "Cannot open pid file $pid_file"; +my $slave_pid = <$fh>; +close($fh); + +$slave_pid =~ s/\s//g; +open(my $log_fh, '<', $log_file) || die "Cannot open log file $log_file"; + +my $pid_found = 0; +while (my $line = <$log_fh>) { + next unless ($pid_found || $line =~ /^[\d-]* [\d:]* $slave_pid /); + $pid_found = 1 unless ($pid_found); + if ($line =~ /^RocksDB: Last binlog file position.*slave-bin\..*\n/) { + print "Binlog Info Found\n"; + } +} +close($log_fh); diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf new file mode 100644 index 0000000000000..bbffb0ec11610 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.cnf @@ -0,0 +1,13 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +log_slave_updates +rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 + +[mysqld.2] +relay_log_recovery=1 +relay_log_info_repository=TABLE +log_slave_updates +rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test new file mode 100644 index 0000000000000..0e40e5423a204 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_crash_safe_wal_corrupt.test @@ -0,0 +1,12 @@ +--source suite/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc + +connection slave; +--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1) + +# Verify the log file contains the Last binlog line, but only if the slave server's pid is found +--exec perl suite/rocksdb_rpl/t/rpl_check_for_binlog_info.pl $slave_pid_file $MYSQLTEST_VARDIR/log/mysqld.2.err + +--disable_query_log +connection slave; +call mtr.add_suppression("Recovery from master pos"); +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt new file mode 100644 index 0000000000000..397310d37b4c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt new file mode 100644 index 0000000000000..3f959684a75ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe-slave.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF +--sync_binlog=1000 --relay_log_recovery=1 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test new file mode 100644 index 0000000000000..949fbad666df3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test @@ -0,0 +1,41 @@ +-- source include/have_gtid.inc +-- source include/master-slave.inc +-- source include/have_debug.inc +-- source include/not_valgrind.inc + +-- let $engine = ROCKSDB + +call mtr.add_suppression("Recovery from master pos"); + +-- let $debug_option = crash_before_update_pos +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_after_update_pos_before_apply +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_before_writing_xid +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = half_binlogged_transaction +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_before +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_after_log +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_after_prepare +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_reset.inc +-- let $debug_option = crash_commit_after +-- source extra/rpl_tests/rpl_gtid_crash_safe.inc + +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf new file mode 100644 index 0000000000000..457665f9e76e1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.cnf @@ -0,0 +1,18 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON +rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 + +[mysqld.2] +sync_relay_log_info=100 +relay_log_recovery=1 +relay_log_info_repository=FILE +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON +rocksdb_enable_2pc=OFF +rocksdb_wal_recovery_mode=2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc new file mode 100644 index 0000000000000..43ee7ec526c23 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc @@ -0,0 +1,153 @@ +source include/master-slave.inc; +-- let $uuid = `select @@server_uuid;` + +--exec echo > $MYSQLTEST_VARDIR/log/mysqld.1.err + +connection master; +--disable_warnings +drop table if exists x; +--enable_warnings + +connection master; + +select @@binlog_format; + +create table x (id int primary key, value int, value2 int, index(value)) engine=rocksdb; +insert into x values (1,1,1); +insert into x values (2,1,1); +insert into x values (3,1,1); +insert into x values (4,1,1); +insert into x values (5,1,1); +-- replace_result $uuid uuid +select @@global.gtid_executed; + +sync_slave_with_master; +connection slave; +--let slave_data_dir= query_get_value(SELECT @@DATADIR, @@DATADIR, 1) +--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1) +--disable_query_log +select "--- slave state before crash ---" as ""; +--enable_query_log +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect + +--write_file $MYSQL_TMP_DIR/truncate_tail_wal.sh +#!/bin/bash + +F=`ls -t $slave_data_dir/.rocksdb/*.log | head -n 1` +SIZE=`stat -c %s $F` +NEW_SIZE=`expr $SIZE - 10` +truncate -s $NEW_SIZE $F +rc=$? +if [[ $rc != 0 ]]; then + exit 1 +fi + +kill -9 `head -1 $slave_pid_file` + +exit 0 +EOF +--chmod 0755 $MYSQL_TMP_DIR/truncate_tail_wal.sh +--exec $MYSQL_TMP_DIR/truncate_tail_wal.sh + +--let $rpl_skip_start_slave= 1 +--source include/rpl_start_server.inc +--disable_query_log +select "--- slave state after crash recovery, slave stop, one transaction recovered---" as ""; +--enable_query_log +connection slave; +--exec python suite/rocksdb/t/check_log_for_xa.py $MYSQLTEST_VARDIR/log/mysqld.2.err commit,prepare,rollback +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +--disable_query_log +select "--- slave state after restart, slave start ---" as ""; +--enable_query_log +--source include/start_slave.inc +connection master; +sync_slave_with_master; +connection slave; +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +connection master; +insert into x values (6,1,1); + +sync_slave_with_master; +connection slave; +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +connection master; +insert into x values (7,1,1); +insert into x values (8,1,1); +insert into x values (9,1,1); +insert into x values (10,1,1); +insert into x values (11,1,1); +insert into x values (12,1,1); +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +sync_slave_with_master; + +connection slave; + +# Corrupting WAL. MyRocks does point in time recovery with wal_recovery_mode=2. +# It loses some data but can resync after restarting slave. + +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect + +--write_file $MYSQL_TMP_DIR/corrupt_wal.sh +#!/bin/bash + +# expected to be around 950 bytes +F=`ls -t $slave_data_dir/.rocksdb/*.log | head -n 1` +SIZE=`stat -c %s $F` +OFFSET=$(( $SIZE-500 )) +dd bs=1 if=/dev/zero of=$F count=100 seek=$OFFSET conv=notrunc + +kill -9 `head -1 $slave_pid_file` + +exit 0 +EOF +--chmod 0755 $MYSQL_TMP_DIR/corrupt_wal.sh +--exec $MYSQL_TMP_DIR/corrupt_wal.sh + +--let $rpl_skip_start_slave= 1 +--source include/rpl_start_server.inc +--disable_query_log +select "--- slave state after crash recovery, slave stop, WAL was corrupted, point in time recovery with wal_recovery_mode=2 ---" as ""; +--enable_query_log +select * from x; +--source include/start_slave.inc +connection master; +sync_slave_with_master; +connection slave; +select * from x; +-- replace_result $uuid uuid +select @@global.gtid_executed; +-- replace_result $uuid uuid +select * from mysql.slave_gtid_info; + +connection master; +drop table x; + + +--remove_file $MYSQL_TMP_DIR/truncate_tail_wal.sh +--remove_file $MYSQL_TMP_DIR/corrupt_wal.sh +--source include/rpl_end.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test new file mode 100644 index 0000000000000..3b660b2640f45 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.test @@ -0,0 +1,12 @@ +-- source suite/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc + +connection slave; +-- let _SLAVE_PID_FILE= query_get_value(SELECT @@pid_file, @@pid_file, 1) + +# Verify the log file contains the Last binlog line, but only if the slave server's pid is found +--exec perl suite/rocksdb_rpl/t/rpl_check_for_binlog_info.pl $slave_pid_file $MYSQLTEST_VARDIR/log/mysqld.2.err + +--disable_query_log +connection slave; +call mtr.add_suppression("Recovery from master pos"); +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt new file mode 100644 index 0000000000000..d828b6c01f496 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt new file mode 100644 index 0000000000000..d828b6c01f496 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header-slave.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test new file mode 100644 index 0000000000000..56c0eac25174a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test @@ -0,0 +1,39 @@ +# based on rpl/rpl_gtid_innondb_sys_header.test +source include/master-slave.inc; +source include/have_gtid.inc; +source include/have_debug.inc; +source include/not_valgrind.inc; + +--let $old_debug = `select @@global.debug;` + +connection master; +create table t1 (a int primary key) engine=rocksdb; +insert into t1 values(1); +--eval SET GLOBAL debug = '+d,crash_before_writing_xid' +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +insert into t1 values(2); + +--source include/wait_until_disconnected.inc +--let $rpl_server_number = 1 +--source include/rpl_reconnect.inc + +--eval SET GLOBAL debug = `$old_debug` + +connection slave; +disable_warnings; +source include/start_slave.inc; +enable_warnings; +connection master; +sync_slave_with_master; + +connection master; +--let $master_uuid= query_get_value(select @@server_uuid, @@server_uuid, 1) +--replace_result $master_uuid master_uuid +--exec grep 'RocksDB: Last MySQL Gtid $master_uuid' $MYSQLTEST_VARDIR/log/mysqld.1.err + +drop table t1; +source include/rpl_end.inc; +-- move_file $MYSQLTEST_VARDIR/log/mysqld.1.err $MYSQLTEST_VARDIR/log/mysqld.1.err.orig +-- write_file $MYSQLTEST_VARDIR/log/mysqld.1.err +EOF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt new file mode 100644 index 0000000000000..1c8dc1e62e9b2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag-slave.opt @@ -0,0 +1 @@ +--unique-check-lag-threshold=5 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test new file mode 100644 index 0000000000000..8c79d2afa03ad --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test @@ -0,0 +1,6 @@ +--echo # +--echo # Ensure skip_unique_check is set when lag exceeds lag_threshold +--echo # + +--source ../include/rpl_no_unique_check_on_lag.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt new file mode 100644 index 0000000000000..1c8dc1e62e9b2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts-slave.opt @@ -0,0 +1 @@ +--unique-check-lag-threshold=5 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test new file mode 100644 index 0000000000000..c5cf1a8ae92d9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test @@ -0,0 +1,2 @@ +--source ../include/rpl_no_unique_check_on_lag.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt new file mode 100644 index 0000000000000..74c2de3710083 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_flush_log_at_trx_commit=1 --rocksdb_write_disable_wal=OFF diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt new file mode 100644 index 0000000000000..c747adc94d508 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover-slave.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test new file mode 100644 index 0000000000000..ea1fe3e34d68e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_2pc_crash_recover.test @@ -0,0 +1,56 @@ +--source include/have_binlog_format_row.inc +--source include/have_rocksdb.inc +--source include/have_debug.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +create table t1 (a int primary key, msg varchar(255)) engine=rocksdb; + +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_prepare"; +--error 0,2013 +insert into t1 values (1, 'dogz'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_log"; +--error 0,2013 +insert into t1 values (2, 'catz'), (3, 'men'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after"; +--error 0,2013 +insert into t1 values (4, 'cars'), (5, 'foo'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after_log"; +--error 0,2013 +insert into t1 values (6, 'shipz'), (7, 'tankz'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +SET SESSION debug="d,crash_commit_after"; +--error 0,2013 +insert into t1 values (8, 'space'), (9, 'time'); +--enable_reconnect +--source include/wait_until_connected_again.inc +select * from t1; + +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt new file mode 100644 index 0000000000000..c747adc94d508 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-master.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt new file mode 100644 index 0000000000000..c747adc94d508 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot-slave.opt @@ -0,0 +1 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test new file mode 100644 index 0000000000000..37f80c8ace5e7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test @@ -0,0 +1,373 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo # Establish connection con1 (user=root) +connect (con1,localhost,root,,); +--echo # Establish connection con2 (user=root) +connect (con2,localhost,root,,); +--echo # Establish connection con3 (user=root) +connect (con3,localhost,root,,); +--echo # Establish connection con4 (user=root) +connect (con4,localhost,root,,); + +--echo # reset replication to guarantee that master-bin.000001 is used +connection slave; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; + +connection master; +RESET MASTER; + +connection slave; +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root"; +--source include/start_slave.inc + +### Test 1: +### - While a consistent snapshot transaction is executed, +### no external inserts should be visible to the transaction. + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +--error ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT SNAPSHOT; +--error ER_UNKNOWN_ERROR +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +ROLLBACK; +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; + + +--disable_query_log +--disable_result_log +let $x=1000; +while ($x) { + START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + dec $x; +} +--enable_query_log +--enable_result_log + +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +--echo # Switch to connection con2 +connection con2; +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); + +--echo # Switch to connection con1 +connection con1; +SELECT * FROM t1; # should fetch one row +COMMIT; + +SELECT * FROM t1; # should fetch three rows + +DROP TABLE t1; + +### Test 2: +### - confirm result from snapshot select and replication replay matches original + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +let $binlog_pos = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Position, 1); + +--echo # Switch to connection con2 +connection con2; +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); + +--echo # Switch to connection con1 +connection con1; +SELECT * FROM t1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; +SELECT * FROM t1; + +--replace_result $MASTER_MYPORT MASTER_PORT $binlog_pos binlog_pos +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=$binlog_pos; +--source include/start_slave.inc + +connection master; +sync_slave_with_master slave; + +SELECT * FROM t1; +SELECT * FROM t1_backup; +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +### Test 3: +### - confirm result from snapshot select and replication replay matches original +### - use non-deterministic concurrency + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +--echo # async queries from con2 +connection con2; +send INSERT INTO t1 VALUES(2); + +--echo # async queries from con3 +connection con3; +send INSERT INTO t1 VALUES(21); + +--echo # Switch to connection con1 +connection con1; + +let $binlog_pos = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Position, 1); + +--echo # Switch to connection con4 +connection con4; +INSERT INTO t1 VALUES(9); + +--echo # Switch to connection con1 +connection con1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # reap async statements +connection con2; +reap; + +connection con3; +reap; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +DELETE FROM t1; +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; + +--replace_result $MASTER_MYPORT MASTER_PORT $binlog_pos binlog_pos +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root",master_log_file="master-bin.000001",master_log_pos=$binlog_pos; +--source include/start_slave.inc + +--echo # sync and then query slave +connection master; +sync_slave_with_master slave; + +let $sum1 = `SELECT SUM(a) from t1`; +let $sum2 = `SELECT SUM(a) from t1_backup`; +--disable_query_log +eval select $sum2 - $sum1 ShouldBeZero; +--enable_query_log + +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +### Test 4: +### - confirm result from snapshot select and replication relay using gtid protocol matches original + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +-- replace_regex /[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/UUID/ +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +let $gtid_executed = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Gtid_executed, 1); + +--echo # Switch to connection con2 +connection con2; +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(3); + +--echo # Switch to connection con1 +connection con1; +SELECT * FROM t1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +--replace_result $gtid_executed gtid_executed_from_snapshot +eval SET @@global.gtid_purged='$gtid_executed'; +DELETE FROM t1; +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; +SELECT * FROM t1; + +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root", master_auto_position=1; +--source include/start_slave.inc + +connection master; +sync_slave_with_master slave; + +SELECT * FROM t1; +SELECT * FROM t1_backup; +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +### Test 5: +### - confirm result from snapshot select and replication replay using gtid_protocol matches original +### - use non-deterministic concurrency + +--echo # Switch to connection con1 +connection con1; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1); + +--echo # async queries from con2 +connection con2; +send INSERT INTO t1 VALUES(2); + +--echo # async queries from con3 +connection con3; +send INSERT INTO t1 VALUES(21); + +--echo # Switch to connection con1 +connection con1; + +let $gtid_executed = query_get_value(START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT, Gtid_executed, 1); + +--echo # Switch to connection con4 +connection con4; +INSERT INTO t1 VALUES(9); + +--echo # Switch to connection con1 +connection con1; + +--let $outfile = $MYSQLTEST_VARDIR/tmp/rpl_rocksdb_snapshot.out.file + +--replace_result $MYSQLTEST_VARDIR +eval SELECT * INTO OUTFILE '$outfile' FROM t1; +COMMIT; + +--echo # reap async statements +connection con2; +reap; + +connection con3; +reap; + +--echo # Switch to slave +sync_slave_with_master slave; + +CREATE TABLE t1_backup LIKE t1; +INSERT INTO t1_backup SELECT * FROM t1; +--source include/stop_slave.inc +RESET SLAVE; +RESET MASTER; +--replace_result $gtid_executed gtid_executed_from_snapshot +eval SET @@global.gtid_purged='$gtid_executed'; +DELETE FROM t1; + +--replace_result $MYSQLTEST_VARDIR +eval LOAD DATA INFILE '$outfile' INTO TABLE t1; + +--replace_result $MASTER_MYPORT MASTER_PORT +eval CHANGE MASTER TO master_host="127.0.0.1",master_port=$MASTER_MYPORT,master_user="root", master_auto_position=1; +--source include/start_slave.inc + +--echo # sync and then query slave +connection master; +sync_slave_with_master slave; + +let $sum1 = `SELECT SUM(a) from t1`; +let $sum2 = `SELECT SUM(a) from t1_backup`; +--disable_query_log +eval select $sum2 - $sum1 ShouldBeZero; +--enable_query_log + +DROP TABLE t1_backup; + +connection master; +DROP TABLE t1; +--remove_file $outfile + +--echo # Switch to connection default + close connections con1 and con2 +connection con1; +disconnect con1; +--source include/wait_until_disconnected.inc +connection con2; +disconnect con2; +--source include/wait_until_disconnected.inc +connection con3; +disconnect con3; +--source include/wait_until_disconnected.inc +connection con4; +disconnect con4; +--source include/wait_until_disconnected.inc + +connection default; +sync_slave_with_master slave; +--source include/stop_slave.inc +CHANGE MASTER to master_auto_position=0; +--source include/start_slave.inc + +--source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test new file mode 100644 index 0000000000000..2b590f8465389 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test @@ -0,0 +1,17 @@ +--source include/master-slave.inc +--source include/have_binlog_format_row.inc + +--connection master +create table t1(a int primary key); + +FLUSH LOGS; + +insert into t1 values(1); +insert into t1 values(2); + +FLUSH LOGS; + +START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; + +drop table t1; +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt new file mode 100644 index 0000000000000..5c5a73bf2a418 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-master.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--binlog_rows_query_log_events=TRUE --rocksdb_unsafe_for_binlog=TRUE diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt new file mode 100644 index 0000000000000..67f0fcf77f07f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --max_binlog_size=50000 +--slave_parallel_workers=30 --relay_log_recovery=1 --rocksdb_unsafe_for_binlog=TRUE diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test new file mode 100644 index 0000000000000..17b866060b7b1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash.test @@ -0,0 +1,26 @@ +-- source include/big_test.inc +-- source include/master-slave.inc +-- source include/not_valgrind.inc +-- source include/have_gtid.inc +-- source include/have_rocksdb.inc + +connection master; +call mtr.add_suppression(".*"); +sync_slave_with_master; +-- source include/stop_slave.inc +change master to master_auto_position=1; +-- source include/start_slave.inc + +-- let $iter=100 +-- let $databases=30 +-- let $num_crashes=100 +-- let $include_silent=1 +-- let $storage_engine='rocksdb' +-- source extra/rpl_tests/rpl_parallel_load_innodb.test +-- let $include_silent=0 + +-- source include/stop_slave.inc +change master to master_auto_position=0; +-- source include/start_slave.inc + +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt new file mode 100644 index 0000000000000..39bb3238861f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-master.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--binlog_format=STATEMENT --default-storage-engine=rocksdb diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt new file mode 100644 index 0000000000000..826f1ee9cb638 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format-slave.opt @@ -0,0 +1,2 @@ +--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates +--sync_binlog=1000 --relay_log_recovery=1 --default-storage-engine=rocksdb diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test new file mode 100644 index 0000000000000..22151d1454751 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test @@ -0,0 +1,51 @@ +# Checks if the slave stops executing transactions when master's binlog format +# is STATEMENT but rpl_skip_tx_api is enabled +-- source include/master-slave.inc + +call mtr.add_suppression("Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave"); + +connection slave; +let $old_rpl_skip_tx_api= `SELECT @@global.rpl_skip_tx_api`; +set global rpl_skip_tx_api=ON; + +connection master; +let $old_rocksdb_unsafe_for_binlog= `SELECT @@global.rocksdb_unsafe_for_binlog`; +set global rocksdb_unsafe_for_binlog=1; +create table t1(a int); +set session binlog_format=STATEMENT; +insert into t1 values(1); + +# Wait till we hit the binlog format mismatch error +connection slave; +let $slave_sql_errno= convert_error(ER_MTS_INCONSISTENT_DATA); # 1756 +let $show_slave_sql_error= 1; +source include/wait_for_slave_sql_error.inc; + +# Print table +connection slave; +echo "Table after error"; +select * from t1; + +connection slave; +# Turn off rpl_skip_tx_api and start the slave again +set global rpl_skip_tx_api=OFF; +source include/start_slave.inc; + +connection slave; +source include/sync_slave_sql_with_master.inc; + +connection slave; +# Print table again +echo "Table after error fixed"; +select * from t1; + +# Cleanup +connection master; +drop table t1; +eval set global rocksdb_unsafe_for_binlog=$old_rocksdb_unsafe_for_binlog; +sync_slave_with_master; + +connection slave; +eval set global rpl_skip_tx_api=$old_rpl_skip_tx_api; + +-- source include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc b/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc new file mode 100644 index 0000000000000..a8ac90fcc3ffd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/include/rocksdb_stress.inc @@ -0,0 +1,56 @@ +# Run the load generator to populate the table and generate concurrent +# updates. After the load generator is complete, verify the tables on the +# master and the slave are consistent + +--sync_slave_with_master + +--connection master +--let $master_host = 127.0.0.1 +let $MYSQL_BASEDIR = `SELECT @@BASEDIR`; + +let $exec = + python $MYSQL_BASEDIR/mysql-test/suite/rocksdb_stress/t/load_generator.py + -L $MYSQL_TMP_DIR/load_generator.log -H $master_host -t $table + -P $MASTER_MYPORT -n $num_records -m $max_records + -l $num_loaders -c $num_checkers -r $num_requests + -E $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + -D $reap_delay; + +exec $exec; + +enable_reconnect; +source include/wait_until_connected_again.inc; + +--let $master_checksum = query_get_value(CHECKSUM TABLE $table, Checksum, 1) + +# if sync_slave_with_master had a configurable timeout this would not be needed +let $slave_sync_timeout = 7200; +--source include/wait_for_slave_to_sync_with_master.inc + +--connection slave +--let $slave_checksum = query_get_value(CHECKSUM TABLE $table, Checksum, 1) + +let $not_same = `SELECT $master_checksum-$slave_checksum`; +if ($not_same) +{ + --die "The checksums of table $table for the master and slave do not match!" +} + +# Cleanup +--connection master +--let $cleanup = DROP TABLE $table +eval $cleanup; + +# if sync_slave_with_master had a configurable timeout this would not be needed +let $slave_sync_timeout = 7200; +--source include/wait_for_slave_to_sync_with_master.inc + +--connection slave +--source include/stop_slave.inc +# For stress tests sometimes the replication thread can not connect to master +# temporarily. This is either because the master crashed and it is recovering +# or the master is too busy and could not service the slave's requests. +# mtr's internal check requires that there be no errors in slave status. +# restarting replication clears the errors. +--source include/start_slave.inc +--source include/stop_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf b/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf new file mode 100644 index 0000000000000..fb985f5d1b4e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/my.cnf @@ -0,0 +1,8 @@ +!include include/default_my.cnf +!include suite/rocksdb/my.cnf +!include suite/rpl/my.cnf + +[mysqld.1] +binlog_format=row +[mysqld.2] +binlog_format=row diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result new file mode 100644 index 0000000000000..3d76e035e05a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress.result @@ -0,0 +1,21 @@ +include/master-slave.inc +[connection master] +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(id INT PRIMARY KEY, +thread_id INT NOT NULL, +request_id BIGINT UNSIGNED NOT NULL, +update_count INT UNSIGNED NOT NULL DEFAULT 0, +zero_sum INT DEFAULT 0, +msg VARCHAR(1024), +msg_length int, +msg_checksum varchar(128), +KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +stop slave; +start slave; +DROP TABLE t1; +stop slave; +start slave; +include/stop_slave.inc +include/start_slave.inc +include/stop_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result new file mode 100644 index 0000000000000..3d76e035e05a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/r/rocksdb_stress_crash.result @@ -0,0 +1,21 @@ +include/master-slave.inc +[connection master] +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(id INT PRIMARY KEY, +thread_id INT NOT NULL, +request_id BIGINT UNSIGNED NOT NULL, +update_count INT UNSIGNED NOT NULL DEFAULT 0, +zero_sum INT DEFAULT 0, +msg VARCHAR(1024), +msg_length int, +msg_checksum varchar(128), +KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +stop slave; +start slave; +DROP TABLE t1; +stop slave; +start slave; +include/stop_slave.inc +include/start_slave.inc +include/stop_slave.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py new file mode 100644 index 0000000000000..20098f49b42ff --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/load_generator.py @@ -0,0 +1,1029 @@ +import cStringIO +import array +import hashlib +import MySQLdb +from MySQLdb.constants import CR +from MySQLdb.constants import ER +from collections import deque +import os +import random +import signal +import sys +import threading +import time +import string +import traceback +import logging +import argparse + +# This is a generic load_generator for mysqld which persists across server +# restarts and attempts to verify both committed and uncommitted transactions +# are persisted correctly. +# +# The table schema used should look something like: +# +# CREATE TABLE t1(id INT PRIMARY KEY, +# thread_id INT NOT NULL, +# request_id BIGINT UNSIGNED NOT NULL, +# update_count INT UNSIGNED NOT NULL DEFAULT 0, +# zero_sum INT DEFAULT 0, +# msg VARCHAR(1024), +# msg_length int, +# msg_checksum varchar(128), +# KEY msg_i(msg(255), zero_sum)) +# ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; +# +# zero_sum should always sum up to 0 regardless of when the transaction tries +# to process the transaction. Each transaction always maintain this sum to 0. +# +# request_id should be unique across transactions. It is used during +# transaction verification and is monotonically increasing.. +# +# Several threads are spawned at the start of the test to populate the table. +# Once the table is populated, both loader and checker threads are created. +# +# The row id space is split into two sections: exclusive and shared. Each +# loader thread owns some part of the exclusive section which it maintains +# complete information on insert/updates/deletes. Since this section is only +# modified by one thread, the thread can maintain an accurate picture of all +# changes. The shared section contains rows which multiple threads can +# update/delete/insert. For checking purposes, the request_id is used to +# determine if a row is consistent with a committed transaction. +# +# Each loader thread's transaction consists of selecting some number of rows +# randomly. The thread can choose to delete the row, update the row or insert +# the row if it doesn't exist. The state of rows that are owned by the loader +# thread are tracked within the thread's id_map. This map contains the row id +# and the request_id of the latest update. For indicating deleted rows, the +# -request_id marker is used. Thus, at any point in time, the thread's id_map +# should reflect the exact state of the rows that are owned. +# +# The loader thread also maintains the state of older transactions that were +# successfully processed in addition to the current transaction, which may or +# may not be committed. Each transaction state consists of the row id, and the +# request_id. Again, -request_id is used to indicate a delete. For committed +# transactions, the thread can verify the request_id of the row is larger than +# what the thread has recorded. For uncommitted transactions, the thread would +# verify the request_id of the row does not match that of the transaction. To +# determine whether or not a transaction succeeded in case of a crash right at +# commit, each thread always includes a particular row in the transaction which +# it could use to check the request id against. +# +# Checker threads run continuously to verify the checksums on the rows and to +# verify the zero_sum column sums up to zero at any point in time. The checker +# threads run both point lookups and range scans for selecting the rows. + +class ValidateError(Exception): + """Raised when validation fails""" + pass + +class TestError(Exception): + """Raised when the test cannot make forward progress""" + pass + +CHARS = string.letters + string.digits +OPTIONS = {} + +# max number of rows per transaction +MAX_ROWS_PER_REQ = 10 + +# global variable checked by threads to determine if the test is stopping +TEST_STOP = False +LOADERS_READY = 0 + +# global monotonically increasing request id counter +REQUEST_ID = 1 +REQUEST_ID_LOCK = threading.Lock() + +def get_next_request_id(): + global REQUEST_ID + with REQUEST_ID_LOCK: + REQUEST_ID += 1 + return REQUEST_ID + +# given a percentage value, rolls a 100-sided die and return whether the +# given value is above or equal to the die roll +# +# passing 0 should always return false and 100 should always return true +def roll_d100(p): + assert p >= 0 and p <= 100 + return p >= random.randint(1, 100) + +def sha1(x): + return hashlib.sha1(str(x)).hexdigest() + +def is_connection_error(exc): + error_code = exc.args[0] + return (error_code == MySQLdb.constants.CR.CONNECTION_ERROR or + error_code == MySQLdb.constants.CR.CONN_HOST_ERROR or + error_code == MySQLdb.constants.CR.SERVER_LOST or + error_code == MySQLdb.constants.CR.SERVER_GONE_ERROR or + error_code == MySQLdb.constants.ER.QUERY_INTERRUPTED or + error_code == MySQLdb.constants.ER.SERVER_SHUTDOWN) + +def is_deadlock_error(exc): + error_code = exc.args[0] + return (error_code == MySQLdb.constants.ER.LOCK_DEADLOCK or + error_code == MySQLdb.constants.ER.LOCK_WAIT_TIMEOUT) + +# should be deterministic given an idx +def gen_msg(idx, thread_id, request_id): + random.seed(idx); + # field length is 1024 bytes, but 32 are reserved for the tid and req tag + blob_length = random.randint(1, 1024 - 32) + + if roll_d100(50): + # blob that cannot be compressed (well, compresses to 85% of original size) + msg = ''.join([random.choice(CHARS) for x in xrange(blob_length)]) + else: + # blob that can be compressed + msg = random.choice(CHARS) * blob_length + + # append the thread_id and request_id to the end of the msg + return ''.join([msg, ' tid: %d req: %d' % (thread_id, request_id)]) + +def execute(cur, stmt): + ROW_COUNT_ERROR = 18446744073709551615L + logging.debug("Executing %s" % stmt) + cur.execute(stmt) + if cur.rowcount < 0 or cur.rowcount == ROW_COUNT_ERROR: + raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR, + "Possible connection error, rowcount is %d" + % cur.rowcount) + +def wait_for_workers(workers, min_active = 0): + logging.info("Waiting for %d workers", len(workers)) + # min_active needs to include the current waiting thread + min_active += 1 + + # polling here allows this thread to be responsive to keyboard interrupt + # exceptions, otherwise a user hitting ctrl-c would see the load_generator as + # hanging and unresponsive + try: + while threading.active_count() > min_active: + time.sleep(1) + except KeyboardInterrupt, e: + os._exit(1) + + num_failures = 0 + for w in workers: + w.join() + if w.exception: + logging.error(w.exception) + num_failures += 1 + + return num_failures + +# base class for worker threads and contains logic for handling reconnecting to +# the mysqld server during connection failure +class WorkerThread(threading.Thread): + def __init__(self, name): + threading.Thread.__init__(self) + self.name = name + self.exception = None + self.con = None + self.cur = None + self.isolation_level = None + self.start_time = time.time() + self.total_time = 0 + + def run(self): + global TEST_STOP + + try: + logging.info("Started") + self.runme() + logging.info("Completed successfully") + except Exception, e: + self.exception = traceback.format_exc() + logging.error(self.exception) + TEST_STOP = True + finally: + self.total_time = time.time() - self.start_time + logging.info("Total run time: %.2f s" % self.total_time) + self.finish() + + def reconnect(self, timeout=900): + global TEST_STOP + + self.con = None + SECONDS_BETWEEN_RETRY = 10 + attempts = 1 + logging.info("Attempting to connect to MySQL Server") + while not self.con and timeout > 0 and not TEST_STOP: + try: + self.con = MySQLdb.connect(user=OPTIONS.user, host=OPTIONS.host, + port=OPTIONS.port, db=OPTIONS.db) + if self.con: + self.con.autocommit(False) + self.cur = self.con.cursor() + self.set_isolation_level(self.isolation_level) + logging.info("Connection successful after attempt %d" % attempts) + break + except MySQLdb.Error, e: + logging.debug(traceback.format_exc()) + time.sleep(SECONDS_BETWEEN_RETRY) + timeout -= SECONDS_BETWEEN_RETRY + attempts += 1 + return self.con is None + + def get_isolation_level(self): + execute(self.cur, "SELECT @@SESSION.tx_isolation") + if self.cur.rowcount != 1: + raise TestError("Unable to retrieve tx_isolation") + return self.cur.fetchone()[0] + + def set_isolation_level(self, isolation_level, persist = False): + if isolation_level is not None: + execute(self.cur, "SET @@SESSION.tx_isolation = '%s'" % isolation_level) + if self.cur.rowcount != 0: + raise TestError("Unable to set the isolation level to %s") + + if isolation_level is None or persist: + self.isolation_level = isolation_level + +# periodically kills the server +class ReaperWorker(WorkerThread): + def __init__(self): + WorkerThread.__init__(self, 'reaper') + self.start() + self.kills = 0 + + def finish(self): + logging.info('complete with %d kills' % self.kills) + if self.con: + self.con.close() + + def get_server_pid(self): + execute(self.cur, "SELECT @@pid_file") + if self.cur.rowcount != 1: + raise TestError("Unable to retrieve pid_file") + return int(open(self.cur.fetchone()[0]).read()) + + def runme(self): + global TEST_STOP + time_remain = random.randint(10, 30) + while not TEST_STOP: + if time_remain > 0: + time_remain -= 1 + time.sleep(1) + continue + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + logging.info('killing server...') + with open(OPTIONS.expect_file, 'w+') as expect_file: + expect_file.write('restart') + os.kill(self.get_server_pid(), signal.SIGTERM) + self.kills += 1 + time_remain = random.randint(0, 30) + OPTIONS.reap_delay; + +# runs initially to populate the table with the given number of rows +class PopulateWorker(WorkerThread): + def __init__(self, thread_id, start_id, num_to_add): + WorkerThread.__init__(self, 'populate-%d' % thread_id) + self.thread_id = thread_id + self.start_id = start_id + self.num_to_add = num_to_add + self.table = OPTIONS.table + self.start() + + def finish(self): + if self.con: + self.con.commit() + self.con.close() + + def runme(self): + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + + stmt = None + for i in xrange(self.start_id, self.start_id + self.num_to_add): + stmt = gen_insert(self.table, i, 0, 0, 0) + execute(self.cur, stmt) + if i % 101 == 0: + self.con.commit() + self.con.commit() + logging.info("Inserted %d rows starting at id %d" % + (self.num_to_add, self.start_id)) + +def populate_table(num_records): + + logging.info("Populate_table started for %d records" % num_records) + if num_records == 0: + return False + + num_workers = min(10, num_records / 100) + workers = [] + + N = num_records / num_workers + start_id = 0 + for i in xrange(num_workers): + workers.append(PopulateWorker(i, start_id, N)) + start_id += N + if num_records > start_id: + workers.append(PopulateWorker(num_workers, start_id, + num_records - start_id)) + + # Wait for the populate threads to complete + return wait_for_workers(workers) > 0 + +def gen_insert(table, idx, thread_id, request_id, zero_sum): + msg = gen_msg(idx, thread_id, request_id) + return ("INSERT INTO %s (id, thread_id, request_id, zero_sum, " + "msg, msg_length, msg_checksum) VALUES (%d,%d,%d,%d,'%s',%d,'%s')" + % (table, idx, thread_id, request_id, + zero_sum, msg, len(msg), sha1(msg))) + +def gen_update(table, idx, thread_id, request_id, zero_sum): + msg = gen_msg(idx, thread_id, request_id) + return ("UPDATE %s SET thread_id = %d, request_id = %d, " + "update_count = update_count + 1, zero_sum = zero_sum + (%d), " + "msg = '%s', msg_length = %d, msg_checksum = '%s' WHERE id = %d " + % (table, thread_id, request_id, zero_sum, msg, len(msg), + sha1(msg), idx)) + +def gen_delete(table, idx): + return "DELETE FROM %s WHERE id = %d" % (table, idx) + +def gen_insert_on_dup(table, idx, thread_id, request_id, zero_sum): + msg = gen_msg(idx, thread_id, request_id) + msg_checksum = sha1(msg) + return ("INSERT INTO %s (id, thread_id, request_id, zero_sum, " + "msg, msg_length, msg_checksum) VALUES (%d,%d,%d,%d,'%s',%d,'%s') " + "ON DUPLICATE KEY UPDATE " + "thread_id=%d, request_id=%d, " + "update_count=update_count+1, " + "zero_sum=zero_sum + (%d), msg='%s', msg_length=%d, " + "msg_checksum='%s'" % + (table, idx, thread_id, request_id, + zero_sum, msg, len(msg), msg_checksum, thread_id, request_id, + zero_sum, msg, len(msg), msg_checksum)) + +# Each loader thread owns a part of the id space which it maintains inventory +# for. The loader thread generates inserts, updates and deletes for the table. +# The latest successful transaction and the latest open transaction are kept to +# verify after a disconnect that the rows were recovered properly. +class LoadGenWorker(WorkerThread): + TXN_UNCOMMITTED = 0 + TXN_COMMIT_STARTED = 1 + TXN_COMMITTED = 2 + + def __init__(self, thread_id): + WorkerThread.__init__(self, 'loader-%02d' % thread_id) + self.thread_id = thread_id + self.rand = random.Random() + self.rand.seed(thread_id) + self.loop_num = 0 + + # id_map contains the array of id's owned by this worker thread. It needs + # to be offset by start_id for the actual id + self.id_map = array.array('l') + self.start_id = thread_id * OPTIONS.ids_per_loader + self.num_id = OPTIONS.ids_per_loader + self.start_share_id = OPTIONS.num_loaders * OPTIONS.ids_per_loader + self.max_id = OPTIONS.max_id + self.table = OPTIONS.table + self.num_requests = OPTIONS.num_requests + + # stores information about the latest series of successful transactions + # + # each transaction is simply a map of id -> request_id + # deleted rows are indicated by -request_id + self.prev_txn = deque() + self.cur_txn = None + self.cur_txn_state = None + + self.start() + + def finish(self): + if self.total_time: + req_per_sec = self.loop_num / self.total_time + else: + req_per_sec = -1 + logging.info("total txns: %d, txn/s: %.2f rps" % + (self.loop_num, req_per_sec)) + + # constructs the internal hash map of the ids owned by this thread and + # the request id of each id + def populate_id_map(self): + logging.info("Populating id map") + + REQ_ID_COL = 0 + stmt = "SELECT request_id FROM %s WHERE id = %d" + + # the start_id is used for tracking active transactions, so the row needs + # to exist + idx = self.start_id + execute(self.cur, stmt % (self.table, idx)) + if self.cur.rowcount > 0: + request_id = self.cur.fetchone()[REQ_ID_COL] + else: + request_id = get_next_request_id() + execute(self.cur, gen_insert(self.table, idx, self.thread_id, + request_id, 0)) + self.con.commit() + + self.id_map.append(request_id) + + self.cur_txn = {idx:request_id} + self.cur_txn_state = self.TXN_COMMITTED + for i in xrange(OPTIONS.committed_txns): + self.prev_txn.append(self.cur_txn) + + # fetch the rest of the row for the id space owned by this thread + for idx in xrange(self.start_id + 1, self.start_id + self.num_id): + execute(self.cur, stmt % (self.table, idx)) + if self.cur.rowcount == 0: + # Negative number is used to indicated a missing row + self.id_map.append(-1) + else: + res = self.cur.fetchone() + self.id_map.append(res[REQ_ID_COL]) + + self.con.commit() + + def apply_cur_txn_changes(self): + # apply the changes to the id_map + for idx in self.cur_txn: + if idx < self.start_id + self.num_id: + assert idx >= self.start_id + self.id_map[idx - self.start_id] = self.cur_txn[idx] + self.cur_txn_state = self.TXN_COMMITTED + + self.prev_txn.append(self.cur_txn) + self.prev_txn.popleft() + + def verify_txn(self, txn, committed): + request_id = txn[self.start_id] + if not committed: + # if the transaction was not committed, then there should be no rows + # in the table that have this request_id + cond = '=' + # it is possible the start_id used to track this transaction is in + # the process of being deleted + if request_id < 0: + request_id = -request_id + else: + # if the transaction was committed, then no rows modified by this + # transaction should have a request_id less than this transaction's id + cond = '<' + stmt = ("SELECT COUNT(*) FROM %s WHERE id IN (%s) AND request_id %s %d" % + (self.table, ','.join(str(x) for x in txn), cond, request_id)) + execute(self.cur, stmt) + if (self.cur.rowcount != 1): + raise TestError("Unable to retrieve results for query '%s'" % stmt) + count = self.cur.fetchone()[0] + if (count > 0): + raise TestError("Expected '%s' to return 0 rows, but %d returned " + "instead" % (stmt, count)) + self.con.commit() + + def verify_data(self): + # if the state of the current transaction is unknown (i.e. a commit was + # issued, but the connection failed before, check the start_id row to + # determine if it was committed + request_id = self.cur_txn[self.start_id] + if self.cur_txn_state == self.TXN_COMMIT_STARTED: + assert request_id >= 0 + idx = self.start_id + stmt = "SELECT id, request_id FROM %s where id = %d" % (self.table, idx) + execute(self.cur, stmt) + if (self.cur.rowcount == 0): + raise TestError("Fetching start_id %d via '%s' returned no data! " + "This row should never be deleted!" % (idx, stmt)) + REQUEST_ID_COL = 1 + res = self.cur.fetchone() + if res[REQUEST_ID_COL] == self.cur_txn[idx]: + self.apply_cur_txn_changes() + else: + self.cur_txn_state = self.TXN_UNCOMMITTED + self.con.commit() + + # if the transaction was not committed, verify there are no rows at this + # request id + # + # however, if the transaction was committed, then verify none of the rows + # have a request_id below the request_id recorded by the start_id row. + if self.cur_txn_state == self.TXN_UNCOMMITTED: + self.verify_txn(self.cur_txn, False) + + # verify all committed transactions + for txn in self.prev_txn: + self.verify_txn(txn, True) + + # verify the rows owned by this worker matches the request_id at which + # they were set. + idx = self.start_id + max_map_id = self.start_id + self.num_id + row_count = 0 + ID_COL = 0 + REQ_ID_COL = ID_COL + 1 + + while idx < max_map_id: + if (row_count == 0): + num_rows_to_check = random.randint(50, 100) + execute(self.cur, + "SELECT id, request_id FROM %s where id >= %d and id < %d " + "ORDER BY id LIMIT %d" + % (self.table, idx, max_map_id, num_rows_to_check)) + + # prevent future queries from being issued since we've hit the end of + # the rows that exist in the table + row_count = self.cur.rowcount if self.cur.rowcount != 0 else -1 + + # determine the id of the next available row in the table + if (row_count > 0): + res = self.cur.fetchone() + assert idx <= res[ID_COL] + next_id = res[ID_COL] + row_count -= 1 + else: + next_id = max_map_id + + # rows up to the next id don't exist within the table, verify our + # map has them as removed + while idx < next_id: + # see if the latest transaction may have modified this id. If so, use + # that value. + if self.id_map[idx - self.start_id] >= 0: + raise ValidateError("Row id %d was not found in table, but " + "id_map has it at request_id %d" % + (idx, self.id_map[idx - self.start_id])) + idx += 1 + + if idx == max_map_id: + break + + if (self.id_map[idx - self.start_id] != res[REQ_ID_COL]): + raise ValidateError("Row id %d has req id %d, but %d is the " + "expected value!" % + (idx, res[REQ_ID_COL], + self.id_map[idx - self.start_id])) + idx += 1 + + self.con.commit() + logging.debug("Verified data successfully") + + def execute_one(self): + # select a number of rows; perform an insert; update or delete operation on + # them + num_rows = random.randint(1, MAX_ROWS_PER_REQ) + ids = array.array('L') + + # allocate at least one row in the id space owned by this worker + idx = random.randint(self.start_id, self.start_id + self.num_id - 1) + ids.append(idx) + + for i in xrange(1, num_rows): + # The valid ranges for ids is from start_id to start_id + num_id and from + # start_share_id to max_id. The randint() uses the range from + # start_share_id to max_id + num_id - 1. start_share_id to max_id covers + # the shared range. The exclusive range is covered by max_id to max_id + + # num_id - 1. If any number lands in this >= max_id section, it is + # remapped to start_id and used for selecting a row in the exclusive + # section. + idx = random.randint(self.start_share_id, self.max_id + self.num_id - 1) + if idx >= self.max_id: + idx -= self.max_id - self.start_id + if ids.count(idx) == 0: + ids.append(idx) + + # perform a read of these rows + ID_COL = 0 + ZERO_SUM_COL = ID_COL + 1 + + # For repeatable-read isolation levels on MyRocks, during the lock + # acquisition part of this transaction, it is possible the selected rows + # conflict with another thread's transaction. This results in a deadlock + # error that requires the whole transaction to be rolled back because the + # transaction's current snapshot will always be reading an older version of + # the row. MyRocks will prevent any updates to this row until the + # snapshot is released and re-acquired. + NUM_RETRIES = 100 + for i in xrange(NUM_RETRIES): + ids_found = {} + try: + for idx in ids: + stmt = ("SELECT id, zero_sum FROM %s WHERE id = %d " + "FOR UPDATE" % (self.table, idx)) + execute(self.cur, stmt) + if self.cur.rowcount > 0: + res = self.cur.fetchone() + ids_found[res[ID_COL]] = res[ZERO_SUM_COL] + break + except MySQLdb.OperationalError, e: + if not is_deadlock_error(e): + raise e + + # if a deadlock occurred, rollback the transaction and wait a short time + # before retrying. + logging.debug("%s generated deadlock, retry %d of %d" % + (stmt, i, NUM_RETRIES)) + self.con.rollback() + time.sleep(0.2) + + if i == NUM_RETRIES - 1: + raise TestError("Unable to acquire locks after a number of retries " + "for query '%s'" % stmt) + + # ensure that the zero_sum column remains summed up to zero at the + # end of this operation + current_sum = 0 + + # all row locks acquired at this point, so allocate a request_id + request_id = get_next_request_id() + self.cur_txn = {self.start_id:request_id} + self.cur_txn_state = self.TXN_UNCOMMITTED + + for idx in ids: + stmt = None + zero_sum = self.rand.randint(-1000, 1000) + action = self.rand.randint(0, 3) + is_delete = False + + if idx in ids_found: + # for each row found, determine if it should be updated or deleted + if action == 0: + stmt = gen_delete(self.table, idx) + is_delete = True + current_sum -= ids_found[idx] + else: + stmt = gen_update(self.table, idx, self.thread_id, request_id, + zero_sum) + current_sum += zero_sum + else: + # if it does not exist, then determine if an insert should happen + if action <= 1: + stmt = gen_insert(self.table, idx, self.thread_id, request_id, + zero_sum) + current_sum += zero_sum + + if stmt is not None: + # mark in self.cur_txn what these new changes will be + if is_delete: + self.cur_txn[idx] = -request_id + else: + self.cur_txn[idx] = request_id + execute(self.cur, stmt) + if self.cur.rowcount == 0: + raise TestError("Executing %s returned row count of 0!" % stmt) + + # the start_id row is used to determine if this transaction has been + # committed if the connect fails and it is used to adjust the zero_sum + # correctly + idx = self.start_id + ids.append(idx) + self.cur_txn[idx] = request_id + stmt = gen_insert_on_dup(self.table, idx, self.thread_id, request_id, + -current_sum) + execute(self.cur, stmt) + if self.cur.rowcount == 0: + raise TestError("Executing '%s' returned row count of 0!" % stmt) + + # 90% commit, 10% rollback + if roll_d100(90): + self.con.rollback() + logging.debug("request %s was rolled back" % request_id) + else: + self.cur_txn_state = self.TXN_COMMIT_STARTED + self.con.commit() + if not self.con.get_server_info(): + raise MySQLdb.OperationalError(MySQLdb.constants.CR.CONNECTION_ERROR, + "Possible connection error on commit") + self.apply_cur_txn_changes() + + self.loop_num += 1 + if self.loop_num % 1000 == 0: + logging.info("Processed %d transactions so far" % self.loop_num) + + def runme(self): + global TEST_STOP, LOADERS_READY + + self.start_time = time.time() + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + + self.populate_id_map() + self.verify_data() + + logging.info("Starting load generator") + reconnected = False + LOADERS_READY += 1 + + while self.loop_num < self.num_requests and not TEST_STOP: + try: + # verify our data on each reconnect and also on ocassion + if reconnected or random.randint(1, 500) == 1: + self.verify_data() + reconnected = False + + self.execute_one() + self.loop_num += 1 + except MySQLdb.OperationalError, e: + if not is_connection_error(e): + raise e + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + reconnected = True + return + +# the checker thread is running read only transactions to verify the row +# checksums match the message. +class CheckerWorker(WorkerThread): + def __init__(self, thread_id): + WorkerThread.__init__(self, 'checker-%02d' % thread_id) + self.thread_id = thread_id + self.rand = random.Random() + self.rand.seed(thread_id) + self.max_id = OPTIONS.max_id + self.table = OPTIONS.table + self.loop_num = 0 + self.start() + + def finish(self): + logging.info("total loops: %d" % self.loop_num) + + def check_zerosum(self): + # two methods for checking zero sum + # 1. request the server to do it (90% of the time for now) + # 2. read all rows and calculate directly + if roll_d100(90): + stmt = "SELECT SUM(zero_sum) FROM %s" % self.table + if roll_d100(50): + stmt += " FORCE INDEX(msg_i)" + execute(self.cur, stmt) + + if self.cur.rowcount != 1: + raise ValidateError("Error with query '%s'" % stmt) + res = self.cur.fetchone()[0] + if res != 0: + raise ValidateError("Expected zero_sum to be 0, but %d returned " + "instead" % res) + else: + cur_isolation_level = self.get_isolation_level() + self.set_isolation_level('REPEATABLE-READ') + num_rows_to_check = random.randint(500, 1000) + idx = 0 + sum = 0 + + stmt = "SELECT id, zero_sum FROM %s where id >= %d ORDER BY id LIMIT %d" + ID_COL = 0 + ZERO_SUM_COL = 1 + + while idx < self.max_id: + execute(self.cur, stmt % (self.table, idx, num_rows_to_check)) + if self.cur.rowcount == 0: + break + + for i in xrange(self.cur.rowcount - 1): + sum += self.cur.fetchone()[ZERO_SUM_COL] + + last_row = self.cur.fetchone() + idx = last_row[ID_COL] + 1 + sum += last_row[ZERO_SUM_COL] + + if sum != 0: + raise TestError("Zero sum column expected to total 0, but sum is %d " + "instead!" % sum) + self.set_isolation_level(cur_isolation_level) + + def check_rows(self): + class id_range(): + def __init__(self, min_id, min_inclusive, max_id, max_inclusive): + self.min_id = min_id if min_inclusive else min_id + 1 + self.max_id = max_id if max_inclusive else max_id - 1 + def count(self, idx): + return idx >= self.min_id and idx <= self.max_id + + stmt = ("SELECT id, msg, msg_length, msg_checksum FROM %s WHERE " % + self.table) + + # two methods for checking rows + # 1. pick a number of rows at random + # 2. range scan + if roll_d100(90): + ids = [] + for i in xrange(random.randint(1, MAX_ROWS_PER_REQ)): + ids.append(random.randint(0, self.max_id - 1)) + stmt += "id in (%s)" % ','.join(str(x) for x in ids) + else: + id1 = random.randint(0, self.max_id - 1) + id2 = random.randint(0, self.max_id - 1) + min_inclusive = random.randint(0, 1) + cond1 = '>=' if min_inclusive else '>' + max_inclusive = random.randint(0, 1) + cond2 = '<=' if max_inclusive else '<' + stmt += ("id %s %d AND id %s %d" % + (cond1, min(id1, id2), cond2, max(id1, id2))) + ids = id_range(min(id1, id2), min_inclusive, max(id1, id2), max_inclusive) + + execute(self.cur, stmt) + + ID_COL = 0 + MSG_COL = ID_COL + 1 + MSG_LENGTH_COL = MSG_COL + 1 + MSG_CHECKSUM_COL = MSG_LENGTH_COL + 1 + + for row in self.cur.fetchall(): + idx = row[ID_COL] + msg = row[MSG_COL] + msg_length = row[MSG_LENGTH_COL] + msg_checksum = row[MSG_CHECKSUM_COL] + if ids.count(idx) < 1: + raise ValidateError( + "id %d returned from database, but query was '%s'" % (idx, stmt)) + if (len(msg) != msg_length): + raise ValidateError( + "id %d contains msg_length %d, but msg '%s' is only %d " + "characters long" % (idx, msg_length, msg, len(msg))) + if (sha1(msg) != msg_checksum): + raise ValidateError("id %d has checksum '%s', but expected checksum " + "is '%s'" % (idx, msg_checksum, sha1(msg))) + + def runme(self): + global TEST_STOP + + self.start_time = time.time() + if self.reconnect(): + raise Exception("Unable to connect to MySQL server") + logging.info("Starting checker") + + while not TEST_STOP: + try: + # choose one of three options: + # 1. compute zero_sum across all rows is 0 + # 2. read a number of rows and verify checksums + if roll_d100(25): + self.check_zerosum() + else: + self.check_rows() + + self.con.commit() + self.loop_num += 1 + if self.loop_num % 10000 == 0: + logging.info("Processed %d transactions so far" % self.loop_num) + except MySQLdb.OperationalError, e: + if not is_connection_error(e): + raise e + if self.reconnect(): + raise Exception("Unable to reconnect to MySQL server") + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Concurrent load generator.') + + parser.add_argument('-C, --committed-txns', dest='committed_txns', + default=3, type=int, + help="number of committed txns to verify") + + parser.add_argument('-c, --num-checkers', dest='num_checkers', type=int, + default=4, + help="number of reader/checker threads to test with") + + parser.add_argument('-d, --db', dest='db', default='test', + help="mysqld server database to test with") + + parser.add_argument('-H, --host', dest='host', default='127.0.0.1', + help="mysqld server host ip address") + + parser.add_argument('-i, --ids-per-loader', dest='ids_per_loader', + type=int, default=100, + help="number of records which each loader owns " + "exclusively, up to max-id / 2 / num-loaders") + + parser.add_argument('-L, --log-file', dest='log_file', default=None, + help="log file for output") + + parser.add_argument('-l, --num-loaders', dest='num_loaders', type=int, + default=16, + help="number of loader threads to test with") + + parser.add_argument('-m, --max-id', dest='max_id', type=int, default=1000, + help="maximum number of records which the table " + "extends to, must be larger than ids_per_loader * " + "num_loaders") + + parser.add_argument('-n, --num-records', dest='num_records', type=int, + default=0, + help="number of records to populate the table with") + + parser.add_argument('-P, --port', dest='port', default=3307, type=int, + help='mysqld server host port') + + parser.add_argument('-r, --num-requests', dest='num_requests', type=int, + default=100000000, + help="number of requests issued per worker thread") + + parser.add_argument('-T, --truncate', dest='truncate', action='store_true', + help="truncates or creates the table before the test") + + parser.add_argument('-t, --table', dest='table', default='t1', + help="mysqld server table to test with") + + parser.add_argument('-u, --user', dest='user', default='root', + help="user to log into the mysql server") + + parser.add_argument('-v, --verbose', dest='verbose', action='store_true', + help="enable debug logging") + + parser.add_argument('-E, --expect-file', dest='expect_file', default=None, + help="expect file for server restart") + + parser.add_argument('-D, --reap-delay', dest='reap_delay', type=int, + default=0, + help="seconds to sleep after each server reap") + + OPTIONS = parser.parse_args() + + if OPTIONS.verbose: + log_level = logging.DEBUG + else: + log_level = logging.INFO + + logging.basicConfig(level=log_level, + format='%(asctime)s %(threadName)s [%(levelname)s] ' + '%(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + filename=OPTIONS.log_file) + + logging.info("Command line given: %s" % ' '.join(sys.argv)) + + if (OPTIONS.max_id < 0 or OPTIONS.ids_per_loader <= 0 or + OPTIONS.max_id < OPTIONS.ids_per_loader * OPTIONS.num_loaders): + logging.error("ids-per-loader must be larger tha 0 and max-id must be " + "larger than ids_per_loader * num_loaders") + exit(1) + + logging.info("Using table %s.%s for test" % (OPTIONS.db, OPTIONS.table)) + + if OPTIONS.truncate: + logging.info("Truncating table") + con = MySQLdb.connect(user=OPTIONS.user, host=OPTIONS.host, + port=OPTIONS.port, db=OPTIONS.db) + if not con: + raise TestError("Unable to connect to mysqld server to create/truncate " + "table") + cur = con.cursor() + cur.execute("SELECT COUNT(*) FROM INFORMATION_SCHEMA.tables WHERE " + "table_schema = '%s' AND table_name = '%s'" % + (OPTIONS.db, OPTIONS.table)) + if cur.rowcount != 1: + logging.error("Unable to retrieve information about table %s " + "from information_schema!" % OPTIONS.table) + exit(1) + + if cur.fetchone()[0] == 0: + logging.info("Table %s not found, creating a new one" % OPTIONS.table) + cur.execute("CREATE TABLE %s (id INT PRIMARY KEY, " + "thread_id INT NOT NULL, " + "request_id BIGINT UNSIGNED NOT NULL, " + "update_count INT UNSIGNED NOT NULL DEFAULT 0, " + "zero_sum INT DEFAULT 0, " + "msg VARCHAR(1024), " + "msg_length int, " + "msg_checksum varchar(128), " + "KEY msg_i(msg(255), zero_sum)) " + "ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin" % + OPTIONS.table) + else: + logging.info("Table %s found, truncating" % OPTIONS.table) + cur.execute("TRUNCATE TABLE %s" % OPTIONS.table) + con.commit() + + if populate_table(OPTIONS.num_records): + logging.error("Populate table returned an error") + exit(1) + + logging.info("Starting %d loaders" % OPTIONS.num_loaders) + loaders = [] + for i in xrange(OPTIONS.num_loaders): + loaders.append(LoadGenWorker(i)) + + logging.info("Starting %d checkers" % OPTIONS.num_checkers) + checkers = [] + for i in xrange(OPTIONS.num_checkers): + checkers.append(CheckerWorker(i)) + + while LOADERS_READY < OPTIONS.num_loaders: + time.sleep(0.5) + + if OPTIONS.expect_file and OPTIONS.reap_delay > 0: + logging.info('Starting reaper') + checkers.append(ReaperWorker()) + + workers_failed = 0 + workers_failed += wait_for_workers(loaders, len(checkers)) + + if TEST_STOP: + logging.error("Detected test failure, aborting") + os._exit(1) + + TEST_STOP = True + + workers_failed += wait_for_workers(checkers) + + if workers_failed > 0: + logging.error("Test detected %d failures, aborting" % workers_failed) + sys.exit(1) + + logging.info("Test completed successfully") + sys.exit(0) diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test new file mode 100644 index 0000000000000..7d92bb3f83a23 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress.test @@ -0,0 +1,31 @@ +# basic stress tests for myrocks, just runs the load generator without any crashes + +# Don't test this under valgrind, memory leaks will occur +--disable_warnings +--source include/not_valgrind.inc +--source include/have_rocksdb.inc +--source include/master-slave.inc +DROP TABLE IF EXISTS t1; +--enable_warnings + +# create the actual table +CREATE TABLE t1(id INT PRIMARY KEY, + thread_id INT NOT NULL, + request_id BIGINT UNSIGNED NOT NULL, + update_count INT UNSIGNED NOT NULL DEFAULT 0, + zero_sum INT DEFAULT 0, + msg VARCHAR(1024), + msg_length int, + msg_checksum varchar(128), + KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +--let $table = t1 +--let $num_loaders = 8 +--let $num_checkers = 2 +--let $num_records = 200 +--let $max_records = 100000 +--let $num_requests = 10000 +--let $reap_delay = 0 + +--source suite/rocksdb_stress/include/rocksdb_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test new file mode 100644 index 0000000000000..6f6128579b5e5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_stress/t/rocksdb_stress_crash.test @@ -0,0 +1,32 @@ +# basic stress tests for myrocks, runs the load generator with periodic crashes + +# Don't test this under valgrind, memory leaks will occur +--disable_warnings +--source include/not_valgrind.inc +--source include/have_rocksdb.inc +--source include/master-slave.inc +--source include/have_binlog_format_row.inc +DROP TABLE IF EXISTS t1; +--enable_warnings + +# create the actual table +CREATE TABLE t1(id INT PRIMARY KEY, + thread_id INT NOT NULL, + request_id BIGINT UNSIGNED NOT NULL, + update_count INT UNSIGNED NOT NULL DEFAULT 0, + zero_sum INT DEFAULT 0, + msg VARCHAR(1024), + msg_length int, + msg_checksum varchar(128), + KEY msg_i(msg(255), zero_sum)) +ENGINE=RocksDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin; + +--let $table = t1 +--let $num_loaders = 8 +--let $num_checkers = 2 +--let $num_records = 200 +--let $max_records = 100000 +--let $num_requests = 10000 +--let $reap_delay = 180 + +--source suite/rocksdb_stress/include/rocksdb_stress.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc new file mode 100644 index 0000000000000..f675aec19f98e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc @@ -0,0 +1,25 @@ +## +# $input the value of a boolean type +# $output the value of int type +## +--let $int_value=$value +if ($value==on) +{ + --let $int_value=1 +} + +if ($value==off) +{ + --let $int_value=0 +} + +# MySQL allows 'true' and 'false' on bool values +if ($value==true) +{ + --let $int_value=1 +} + +if ($value==false) +{ + --let $int_value=0 +} diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc new file mode 100644 index 0000000000000..1f762d38c6401 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.inc @@ -0,0 +1,10 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'rocksdb' AND support IN ('YES', 'DEFAULT', 'ENABLED')`) +{ + --skip Test requires engine RocksDB. +} + +--disable_query_log +# Table statistics can vary depending on when the memtables are flushed, so +# flush them at the beginning of the test to ensure the test runs consistently. +set global rocksdb_force_flush_memtable_now = true; +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt new file mode 100644 index 0000000000000..36d7dda16094f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/have_rocksdb.opt @@ -0,0 +1,12 @@ +--loose-enable-rocksdb +--loose-enable-rocksdb_global_info +--loose-enable-rocksdb_ddl +--loose-enable-rocksdb_cf_options +--loose-enable_rocksdb_perf_context +--loose-enable_rocksdb_perf_context_global +--loose-enable-rocksdb_index_file_map +--loose-enable-rocksdb_dbstats +--loose-enable-rocksdb_cfstats +--loose-enable-rocksdb_lock_info +--loose-enable-rocksdb_trx +--loose-enable-rocksdb_locks diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc new file mode 100644 index 0000000000000..6ba9302667482 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc @@ -0,0 +1,123 @@ +## +# $sys_var name of the variable +# $read_only - true if read-only +# $session - true if this is session, false if global-only +# $suppress_default_value - if true, don't check the default value +# valid_values table should contain valid values +# invalid_values +## + +--eval SET @start_global_value = @@global.$sys_var +if (!$suppress_default_value) +{ + SELECT @start_global_value; + if ($session) + { + --eval SET @start_session_value = @@session.$sys_var + SELECT @start_session_value; + } +} + +if (!$read_only) +{ + --echo '# Setting to valid values in global scope#' + + --let $i=1 + --let $value=query_get_value(select value from valid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@global.$sys_var to $value" + --eval SET @@global.$sys_var = $value + --eval SELECT @@global.$sys_var + --let $v=`SELECT @@global.$sys_var` + --source include/correctboolvalue.inc + if (!$sticky) + { + if ($v != $int_value) + { + --echo Set @@global.$sys_var to $value but it remained set to $v + --die Wrong variable value + } + } + + --echo "Setting the global scope variable back to default" + --eval SET @@global.$sys_var = DEFAULT + --eval SELECT @@global.$sys_var + + --inc $i + --let $value=query_get_value(select value from valid_values, value, $i) + } + + if ($session) + { + --echo '# Setting to valid values in session scope#' + + --let $i=1 + --let $value=query_get_value(select value from valid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@session.$sys_var to $value" + --eval SET @@session.$sys_var = $value + --eval SELECT @@session.$sys_var + --let $v=`SELECT @@session.$sys_var` + --source include/correctboolvalue.inc + if (!$sticky) + { + if ($v != $int_value) + { + --echo Set @@session.$sys_var to $value but it remained set to $v + --die Wrong variable value + } + } + --echo "Setting the session scope variable back to default" + --eval SET @@session.$sys_var = DEFAULT + --eval SELECT @@session.$sys_var + + --inc $i + --let $value=query_get_value(select value from valid_values, value, $i) + } + } + if (!$session) + { + --echo "Trying to set variable @@session.$sys_var to 444. It should fail because it is not session." + --Error ER_GLOBAL_VARIABLE + --eval SET @@session.$sys_var = 444 + } + + --echo '# Testing with invalid values in global scope #' + #################################################################### + # Change the value of query_prealloc_size to an invalid value # + #################################################################### + --let $i=1 + --let $value=query_get_value(select value from invalid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@global.$sys_var to $value" + --Error ER_WRONG_VALUE_FOR_VAR, ER_WRONG_TYPE_FOR_VAR + --eval SET @@global.$sys_var = $value + --eval SELECT @@global.$sys_var + --inc $i + --let $value=query_get_value(select value from invalid_values, value, $i) + } +} + +if ($read_only) +{ + --echo "Trying to set variable @@global.$sys_var to 444. It should fail because it is readonly." + --Error ER_INCORRECT_GLOBAL_LOCAL_VAR + --eval SET @@global.$sys_var = 444 +} + +#################################### +# Restore initial value # +#################################### +if (!$read_only) +{ + --eval SET @@global.$sys_var = @start_global_value + --eval SELECT @@global.$sys_var + if ($session) + { + --eval SET @@session.$sys_var = @start_session_value + --eval SELECT @@session.$sys_var + } +} diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf new file mode 100644 index 0000000000000..1e9b0a9d3bb8c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/my.cnf @@ -0,0 +1,10 @@ +!include include/default_my.cnf + +[server] +skip-innodb +default-storage-engine=rocksdb + + +sql-mode=NO_ENGINE_SUBSTITUTION +explicit-defaults-for-timestamp=1 +loose-rocksdb_lock_wait_timeout=1 diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result new file mode 100644 index 0000000000000..9f21825d262f0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result @@ -0,0 +1,15 @@ +create table t1 (test_name text) engine=MyISAM; +create table t2 (variable_name text) engine=MyISAM; +load data infile "MYSQLTEST_VARDIR/tmp/rocksdb_sys_vars.all_vars.txt" into table t1; +insert into t2 select variable_name from information_schema.global_variables where variable_name like "rocksdb_%"; +insert into t2 select variable_name from information_schema.session_variables where variable_name like "rocksdb_%"; +select variable_name as `There should be *no* long test name listed below:` from t2 +where length(variable_name) > 50; +There should be *no* long test name listed below: +select variable_name as `There should be *no* variables listed below:` from t2 +left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name; +There should be *no* variables listed below: +ROCKSDB_ENABLE_2PC +ROCKSDB_ENABLE_2PC +drop table t1; +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result new file mode 100644 index 0000000000000..4398563d0642a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_access_hint_on_compaction_start_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ACCESS_HINT_ON_COMPACTION_START = 444; +ERROR HY000: Variable 'rocksdb_access_hint_on_compaction_start' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result new file mode 100644 index 0000000000000..f7175fd91a3a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_advise_random_on_open_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ADVISE_RANDOM_ON_OPEN = 444; +ERROR HY000: Variable 'rocksdb_advise_random_on_open' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result new file mode 100644 index 0000000000000..93ec1aec40762 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 1" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 1; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 0" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 0; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to on" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = on; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 444; +ERROR HY000: Variable 'rocksdb_allow_concurrent_memtable_write' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'aaa'" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'bbb'" +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = @start_global_value; +SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; +@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result new file mode 100644 index 0000000000000..f0f1b077ae086 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_reads_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ALLOW_MMAP_READS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_MMAP_READS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_MMAP_READS = 444; +ERROR HY000: Variable 'rocksdb_allow_mmap_reads' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result new file mode 100644 index 0000000000000..3fa1f14e1df87 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_mmap_writes_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ALLOW_MMAP_WRITES; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ALLOW_MMAP_WRITES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_MMAP_WRITES = 444; +ERROR HY000: Variable 'rocksdb_allow_mmap_writes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result new file mode 100644 index 0000000000000..8998bfee64d66 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result @@ -0,0 +1,68 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_BACKGROUND_SYNC; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 1" +SET @@global.ROCKSDB_BACKGROUND_SYNC = 1; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 0" +SET @@global.ROCKSDB_BACKGROUND_SYNC = 0; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to on" +SET @@global.ROCKSDB_BACKGROUND_SYNC = on; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to off" +SET @@global.ROCKSDB_BACKGROUND_SYNC = off; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +"Trying to set variable @@session.ROCKSDB_BACKGROUND_SYNC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_BACKGROUND_SYNC = 444; +ERROR HY000: Variable 'rocksdb_background_sync' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 'aaa'" +SET @@global.ROCKSDB_BACKGROUND_SYNC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +SET @@global.ROCKSDB_BACKGROUND_SYNC = @start_global_value; +SELECT @@global.ROCKSDB_BACKGROUND_SYNC; +@@global.ROCKSDB_BACKGROUND_SYNC +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result new file mode 100644 index 0000000000000..09acaada0c631 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_base_background_compactions' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result new file mode 100644 index 0000000000000..805ed2335f769 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_blind_delete_primary_key_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'aaa'" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'bbb'" +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_global_value; +SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_session_value; +SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY; +@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result new file mode 100644 index 0000000000000..1cfe5385d5c29 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_cache_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_CACHE_SIZE; +SELECT @start_global_value; +@start_global_value +536870912 +"Trying to set variable @@global.ROCKSDB_BLOCK_CACHE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_CACHE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_block_cache_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result new file mode 100644 index 0000000000000..4d02e197a678e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_restart_interval_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_RESTART_INTERVAL; +SELECT @start_global_value; +@start_global_value +16 +"Trying to set variable @@global.ROCKSDB_BLOCK_RESTART_INTERVAL to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_RESTART_INTERVAL = 444; +ERROR HY000: Variable 'rocksdb_block_restart_interval' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result new file mode 100644 index 0000000000000..0382184f2a0c1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_SIZE; +SELECT @start_global_value; +@start_global_value +4096 +"Trying to set variable @@global.ROCKSDB_BLOCK_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_SIZE = 444; +ERROR HY000: Variable 'rocksdb_block_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result new file mode 100644 index 0000000000000..83513f814ed59 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_block_size_deviation_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BLOCK_SIZE_DEVIATION; +SELECT @start_global_value; +@start_global_value +10 +"Trying to set variable @@global.ROCKSDB_BLOCK_SIZE_DEVIATION to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BLOCK_SIZE_DEVIATION = 444; +ERROR HY000: Variable 'rocksdb_block_size_deviation' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result new file mode 100644 index 0000000000000..96b78cf669eb7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_BULK_LOAD; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_BULK_LOAD; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 1" +SET @@global.ROCKSDB_BULK_LOAD = 1; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 0" +SET @@global.ROCKSDB_BULK_LOAD = 0; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to on" +SET @@global.ROCKSDB_BULK_LOAD = on; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_BULK_LOAD to 1" +SET @@session.ROCKSDB_BULK_LOAD = 1; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@session.ROCKSDB_BULK_LOAD to 0" +SET @@session.ROCKSDB_BULK_LOAD = 0; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@session.ROCKSDB_BULK_LOAD to on" +SET @@session.ROCKSDB_BULK_LOAD = on; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 'aaa'" +SET @@global.ROCKSDB_BULK_LOAD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD to 'bbb'" +SET @@global.ROCKSDB_BULK_LOAD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +SET @@global.ROCKSDB_BULK_LOAD = @start_global_value; +SELECT @@global.ROCKSDB_BULK_LOAD; +@@global.ROCKSDB_BULK_LOAD +0 +SET @@session.ROCKSDB_BULK_LOAD = @start_session_value; +SELECT @@session.ROCKSDB_BULK_LOAD; +@@session.ROCKSDB_BULK_LOAD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result new file mode 100644 index 0000000000000..40404d2fab5e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bulk_load_size_basic.result @@ -0,0 +1,72 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_BULK_LOAD_SIZE; +SELECT @start_global_value; +@start_global_value +1000 +SET @start_session_value = @@session.ROCKSDB_BULK_LOAD_SIZE; +SELECT @start_session_value; +@start_session_value +1000 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 1" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = 1; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 1024" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = 1024; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_BULK_LOAD_SIZE to 1" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = 1; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1000 +"Trying to set variable @@session.ROCKSDB_BULK_LOAD_SIZE to 1024" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = 1024; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1024 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_BULK_LOAD_SIZE = DEFAULT; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1000 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_BULK_LOAD_SIZE to 'aaa'" +SET @@global.ROCKSDB_BULK_LOAD_SIZE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +SET @@global.ROCKSDB_BULK_LOAD_SIZE = @start_global_value; +SELECT @@global.ROCKSDB_BULK_LOAD_SIZE; +@@global.ROCKSDB_BULK_LOAD_SIZE +1000 +SET @@session.ROCKSDB_BULK_LOAD_SIZE = @start_session_value; +SELECT @@session.ROCKSDB_BULK_LOAD_SIZE; +@@session.ROCKSDB_BULK_LOAD_SIZE +1000 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result new file mode 100644 index 0000000000000..ede02afcb6007 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_bytes_per_sync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_BYTES_PER_SYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_BYTES_PER_SYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_BYTES_PER_SYNC = 444; +ERROR HY000: Variable 'rocksdb_bytes_per_sync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result new file mode 100644 index 0000000000000..12c25ad63dc4e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_cache_index_and_filter_blocks_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS = 444; +ERROR HY000: Variable 'rocksdb_cache_index_and_filter_blocks' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result new file mode 100644 index 0000000000000..694c9a4f1dc87 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_checksums_pct_basic.result @@ -0,0 +1,93 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(99); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_CHECKSUMS_PCT; +SELECT @start_global_value; +@start_global_value +100 +SET @start_session_value = @@session.ROCKSDB_CHECKSUMS_PCT; +SELECT @start_session_value; +@start_session_value +100 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 0" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 0; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 1" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 1; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 99" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 99; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +99 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 0" +SET @@session.ROCKSDB_CHECKSUMS_PCT = 0; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 1" +SET @@session.ROCKSDB_CHECKSUMS_PCT = 1; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +"Trying to set variable @@session.ROCKSDB_CHECKSUMS_PCT to 99" +SET @@session.ROCKSDB_CHECKSUMS_PCT = 99; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +99 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_CHECKSUMS_PCT = DEFAULT; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_CHECKSUMS_PCT to 'aaa'" +SET @@global.ROCKSDB_CHECKSUMS_PCT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +SET @@global.ROCKSDB_CHECKSUMS_PCT = @start_global_value; +SELECT @@global.ROCKSDB_CHECKSUMS_PCT; +@@global.ROCKSDB_CHECKSUMS_PCT +100 +SET @@session.ROCKSDB_CHECKSUMS_PCT = @start_session_value; +SELECT @@session.ROCKSDB_CHECKSUMS_PCT; +@@session.ROCKSDB_CHECKSUMS_PCT +100 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result new file mode 100644 index 0000000000000..2f1019873325a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_collect_sst_properties_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_COLLECT_SST_PROPERTIES; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_COLLECT_SST_PROPERTIES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_COLLECT_SST_PROPERTIES = 444; +ERROR HY000: Variable 'rocksdb_collect_sst_properties' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result new file mode 100644 index 0000000000000..4664ccb2b1e0e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_commit_in_the_middle_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 1" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 1; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 0" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 0; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to on" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = on; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to 1" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = 1; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to 0" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = 0; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE to on" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = on; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = DEFAULT; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 'aaa'" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +"Trying to set variable @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE to 'bbb'" +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +SET @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE = @start_global_value; +SELECT @@global.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@global.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +SET @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE = @start_session_value; +SELECT @@session.ROCKSDB_COMMIT_IN_THE_MIDDLE; +@@session.ROCKSDB_COMMIT_IN_THE_MIDDLE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result new file mode 100644 index 0000000000000..b65ef65c8f0ef --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compact_cf_basic.result @@ -0,0 +1,40 @@ +call mtr.add_suppression(" Column family '[a-z]*' not found."); +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_COMPACT_CF; +SELECT @start_global_value; +@start_global_value + +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACT_CF to abc" +SET @@global.ROCKSDB_COMPACT_CF = abc; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACT_CF = DEFAULT; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Trying to set variable @@global.ROCKSDB_COMPACT_CF to def" +SET @@global.ROCKSDB_COMPACT_CF = def; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACT_CF = DEFAULT; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +"Trying to set variable @@session.ROCKSDB_COMPACT_CF to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACT_CF = 444; +ERROR HY000: Variable 'rocksdb_compact_cf' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_COMPACT_CF = @start_global_value; +SELECT @@global.ROCKSDB_COMPACT_CF; +@@global.ROCKSDB_COMPACT_CF + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result new file mode 100644 index 0000000000000..d971396f9e8fd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_readahead_size_basic.result @@ -0,0 +1,70 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @@global.rocksdb_compaction_readahead_size = -1; +Warnings: +Warning 1292 Truncated incorrect rocksdb_compaction_readahead_siz value: '-1' +SELECT @@global.rocksdb_compaction_readahead_size; +@@global.rocksdb_compaction_readahead_size +0 +SET @start_global_value = @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 1" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 1; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 0" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 0; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 222333" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 222333; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +222333 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_READAHEAD_SIZE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_READAHEAD_SIZE = 444; +ERROR HY000: Variable 'rocksdb_compaction_readahead_size' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE to 'bbb'" +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +SET @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_READAHEAD_SIZE; +@@global.ROCKSDB_COMPACTION_READAHEAD_SIZE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result new file mode 100644 index 0000000000000..311184a17d4c7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 1024" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 1024; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 2000000" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 2000000; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +2000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES to '2000001'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = '2000001'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result new file mode 100644 index 0000000000000..d4e7e28bebc32 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_count_sd_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 0" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 0; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to on" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = on; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_count_sd' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD to 'bbb'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result new file mode 100644 index 0000000000000..703e235ed18e3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_file_size_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 1024" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 1024; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_file_size' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result new file mode 100644 index 0000000000000..84436b65795d4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_compaction_sequential_deletes_window_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); +SET @start_global_value = @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 1" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 1; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 1024" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 1024; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 2000000" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 2000000; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +2000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = DEFAULT; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 444. It should fail because it is not session." +SET @@session.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 444; +ERROR HY000: Variable 'rocksdb_compaction_sequential_deletes_window' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to 'aaa'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +"Trying to set variable @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW to '2000001'" +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = '2000001'; +Got one of the listed errors +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +SET @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW = @start_global_value; +SELECT @@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW; +@@global.ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result new file mode 100644 index 0000000000000..35e4d252e11ff --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result @@ -0,0 +1,15 @@ +SET @start_value = @@global.ROCKSDB_CREATE_CHECKPOINT; +SET @@global.ROCKSDB_CREATE_CHECKPOINT = 'TMP/abc'; +SELECT @@global.ROCKSDB_CREATE_CHECKPOINT; +@@global.ROCKSDB_CREATE_CHECKPOINT + +SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT; +SET @@global.ROCKSDB_CREATE_CHECKPOINT = 'TMP/def'; +SELECT @@global.ROCKSDB_CREATE_CHECKPOINT; +@@global.ROCKSDB_CREATE_CHECKPOINT + +SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT; +SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444; +ERROR HY000: Variable 'rocksdb_create_checkpoint' is a GLOBAL variable and should be set with SET GLOBAL +SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value; +ERROR HY000: RocksDB: Failed to create checkpoint directory. status 5 IO error: .tmp: No such file or directory diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result new file mode 100644 index 0000000000000..26dd14fbb68bf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_if_missing_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_CREATE_IF_MISSING; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_CREATE_IF_MISSING to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_CREATE_IF_MISSING = 444; +ERROR HY000: Variable 'rocksdb_create_if_missing' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result new file mode 100644 index 0000000000000..7debadc2bb170 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_missing_column_families_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES = 444; +ERROR HY000: Variable 'rocksdb_create_missing_column_families' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result new file mode 100644 index 0000000000000..a3f9eff6c1f53 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_datadir_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DATADIR; +SELECT @start_global_value; +@start_global_value +./.rocksdb +"Trying to set variable @@global.ROCKSDB_DATADIR to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DATADIR = 444; +ERROR HY000: Variable 'rocksdb_datadir' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result new file mode 100644 index 0000000000000..6c588b7e060ca --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_db_write_buffer_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DB_WRITE_BUFFER_SIZE = 444; +ERROR HY000: Variable 'rocksdb_db_write_buffer_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result new file mode 100644 index 0000000000000..f200105b54202 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_deadlock_detect_basic.result @@ -0,0 +1,121 @@ +CREATE TABLE valid_values (value varchar(255)); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)); +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_DEADLOCK_DETECT; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_DEADLOCK_DETECT; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 1" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 1; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 0" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 0; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to on" +SET @@global.ROCKSDB_DEADLOCK_DETECT = on; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to off" +SET @@global.ROCKSDB_DEADLOCK_DETECT = off; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to 1" +SET @@session.ROCKSDB_DEADLOCK_DETECT = 1; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to 0" +SET @@session.ROCKSDB_DEADLOCK_DETECT = 0; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to on" +SET @@session.ROCKSDB_DEADLOCK_DETECT = on; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@session.ROCKSDB_DEADLOCK_DETECT to off" +SET @@session.ROCKSDB_DEADLOCK_DETECT = off; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_DEADLOCK_DETECT = DEFAULT; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 'aaa'" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +"Trying to set variable @@global.ROCKSDB_DEADLOCK_DETECT to 'bbb'" +SET @@global.ROCKSDB_DEADLOCK_DETECT = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +SET @@global.ROCKSDB_DEADLOCK_DETECT = @start_global_value; +SELECT @@global.ROCKSDB_DEADLOCK_DETECT; +@@global.ROCKSDB_DEADLOCK_DETECT +0 +SET @@session.ROCKSDB_DEADLOCK_DETECT = @start_session_value; +SELECT @@session.ROCKSDB_DEADLOCK_DETECT; +@@session.ROCKSDB_DEADLOCK_DETECT +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result new file mode 100644 index 0000000000000..5e64ccc69c372 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_optimizer_no_zero_cardinality_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 1" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 1; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 0" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 0; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to on" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = on; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@session.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 444; +ERROR HY000: Variable 'rocksdb_debug_optimizer_no_zero_cardinality' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 'aaa'" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +"Trying to set variable @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY to 'bbb'" +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +SET @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY = @start_global_value; +SELECT @@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY; +@@global.ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result new file mode 100644 index 0000000000000..b2b1c0e4c97ef --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_default_cf_options_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DEFAULT_CF_OPTIONS; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_DEFAULT_CF_OPTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DEFAULT_CF_OPTIONS = 444; +ERROR HY000: Variable 'rocksdb_default_cf_options' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result new file mode 100644 index 0000000000000..3eefd822e69a8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result @@ -0,0 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); +SET @start_global_value = @@global.ROCKSDB_DELAYED_WRITE_RATE; +SELECT @start_global_value; +@start_global_value +16777216 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 100" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 100; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 1" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 1; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 0" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 0; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@session.ROCKSDB_DELAYED_WRITE_RATE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DELAYED_WRITE_RATE = 444; +ERROR HY000: Variable 'rocksdb_delayed_write_rate' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'aaa'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'bbb'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '-1'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '101'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '484436'" +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +SET @@global.ROCKSDB_DELAYED_WRITE_RATE = @start_global_value; +SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; +@@global.ROCKSDB_DELAYED_WRITE_RATE +16777216 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result new file mode 100644 index 0000000000000..2dc220fbe2078 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delete_obsolete_files_period_micros_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS; +SELECT @start_global_value; +@start_global_value +21600000000 +"Trying to set variable @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS = 444; +ERROR HY000: Variable 'rocksdb_delete_obsolete_files_period_micros' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result new file mode 100644 index 0000000000000..686f8bcd39aaa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result @@ -0,0 +1,75 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_2PC; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 1" +SET @@global.ROCKSDB_ENABLE_2PC = 1; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 0" +SET @@global.ROCKSDB_ENABLE_2PC = 0; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to on" +SET @@global.ROCKSDB_ENABLE_2PC = on; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to off" +SET @@global.ROCKSDB_ENABLE_2PC = off; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_2PC = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Trying to set variable @@session.ROCKSDB_ENABLE_2PC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ENABLE_2PC = 444; +ERROR HY000: Variable 'rocksdb_enable_2pc' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 'aaa'" +SET @@global.ROCKSDB_ENABLE_2PC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_2PC to 'bbb'" +SET @@global.ROCKSDB_ENABLE_2PC = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +SET @@global.ROCKSDB_ENABLE_2PC = @start_global_value; +SELECT @@global.ROCKSDB_ENABLE_2PC; +@@global.ROCKSDB_ENABLE_2PC +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result new file mode 100644 index 0000000000000..2c0ff289d8a3e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_bulk_load_api_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_BULK_LOAD_API; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_BULK_LOAD_API to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ENABLE_BULK_LOAD_API = 444; +ERROR HY000: Variable 'rocksdb_enable_bulk_load_api' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result new file mode 100644 index 0000000000000..f12e39fff93e1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_ENABLE_THREAD_TRACKING; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_THREAD_TRACKING to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ENABLE_THREAD_TRACKING = 444; +ERROR HY000: Variable 'rocksdb_enable_thread_tracking' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result new file mode 100644 index 0000000000000..c93152c475672 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 1" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 1; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 0" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 0; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to on" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = on; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 444; +ERROR HY000: Variable 'rocksdb_enable_write_thread_adaptive_yield' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'aaa'" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'bbb'" +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = @start_global_value; +SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; +@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result new file mode 100644 index 0000000000000..650e2956e2336 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_error_if_exists_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_ERROR_IF_EXISTS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_ERROR_IF_EXISTS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ERROR_IF_EXISTS = 444; +ERROR HY000: Variable 'rocksdb_error_if_exists' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result new file mode 100644 index 0000000000000..19be4e3ad5def --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result @@ -0,0 +1,93 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +SELECT @start_global_value; +@start_global_value +1 +SET @start_session_value = @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +SELECT @start_session_value; +@start_session_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +2 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +2 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 'aaa'" +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_global_value; +SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_session_value; +SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; +@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result new file mode 100644 index 0000000000000..165f3811f8489 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_memtable_on_analyze_basic.result @@ -0,0 +1,58 @@ +drop table if exists t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +SELECT * FROM t1; +a b +1 1 +2 2 +3 3 +set session rocksdb_flush_memtable_on_analyze=off; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SHOW INDEXES FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE +set session rocksdb_flush_memtable_on_analyze=on; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SHOW INDEXES FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE +DROP TABLE t1; +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +SELECT * FROM t1; +a b +1 1 +2 2 +3 3 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed # # 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result new file mode 100644 index 0000000000000..a1c4d3caaa4dd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_compute_memtable_stats_basic.result @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo'); +SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats; +set global rocksdb_force_flush_memtable_now = true; +INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d'); +set global rocksdb_force_compute_memtable_stats=0; +SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; +set global rocksdb_force_compute_memtable_stats=1; +SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; +select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end; +case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end +true +DROP TABLE t; +set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result new file mode 100644 index 0000000000000..30444e26d98d3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_now_basic.result @@ -0,0 +1,50 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 1" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 1; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 0" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 0; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to on" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = on; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +"Trying to set variable @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW to 444. It should fail because it is not session." +SET @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = 444; +ERROR HY000: Variable 'rocksdb_force_flush_memtable_now' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW = @start_global_value; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result new file mode 100644 index 0000000000000..1a7a21c3a9f55 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_index_records_in_range_basic.result @@ -0,0 +1,106 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @@session.rocksdb_force_index_records_in_range = -1; +Warnings: +Warning 1292 Truncated incorrect rocksdb_force_index_records_in_r value: '-1' +SELECT @@session.rocksdb_force_index_records_in_range; +@@session.rocksdb_force_index_records_in_range +0 +SET @start_global_value = @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 1" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 1; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 0" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 0; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 222333" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 222333; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +222333 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 1" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 1; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 0" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 0; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 222333" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 222333; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +222333 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 'aaa'" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE to 'bbb'" +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +SET @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = @start_global_value; +SELECT @@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@global.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +SET @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE = @start_session_value; +SELECT @@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE; +@@session.ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result new file mode 100644 index 0000000000000..34deca6ce85f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_hash_index_allow_collision_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_HASH_INDEX_ALLOW_COLLISION = 444; +ERROR HY000: Variable 'rocksdb_hash_index_allow_collision' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result new file mode 100644 index 0000000000000..97c6ed84de797 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_index_type_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_INDEX_TYPE; +SELECT @start_global_value; +@start_global_value +kBinarySearch +"Trying to set variable @@global.ROCKSDB_INDEX_TYPE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_INDEX_TYPE = 444; +ERROR HY000: Variable 'rocksdb_index_type' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result new file mode 100644 index 0000000000000..1509f9ae95d6d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_info_log_level_basic.result @@ -0,0 +1,93 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('fatal_level'); +INSERT INTO valid_values VALUES('error_level'); +INSERT INTO valid_values VALUES('warn_level'); +INSERT INTO valid_values VALUES('info_level'); +INSERT INTO valid_values VALUES('debug_level'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES(5); +INSERT INTO invalid_values VALUES(6); +INSERT INTO invalid_values VALUES('foo'); +SET @start_global_value = @@global.ROCKSDB_INFO_LOG_LEVEL; +SELECT @start_global_value; +@start_global_value +error_level +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to fatal_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = fatal_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +fatal_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to error_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = error_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to warn_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = warn_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +warn_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to info_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = info_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +info_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to debug_level" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = debug_level; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +debug_level +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@session.ROCKSDB_INFO_LOG_LEVEL to 444. It should fail because it is not session." +SET @@session.ROCKSDB_INFO_LOG_LEVEL = 444; +ERROR HY000: Variable 'rocksdb_info_log_level' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to 5" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = 5; +Got one of the listed errors +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to 6" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = 6; +Got one of the listed errors +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +"Trying to set variable @@global.ROCKSDB_INFO_LOG_LEVEL to foo" +SET @@global.ROCKSDB_INFO_LOG_LEVEL = foo; +Got one of the listed errors +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +SET @@global.ROCKSDB_INFO_LOG_LEVEL = @start_global_value; +SELECT @@global.ROCKSDB_INFO_LOG_LEVEL; +@@global.ROCKSDB_INFO_LOG_LEVEL +error_level +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result new file mode 100644 index 0000000000000..87dd0e90511a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_is_fd_close_on_exec_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_IS_FD_CLOSE_ON_EXEC = 444; +ERROR HY000: Variable 'rocksdb_is_fd_close_on_exec' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result new file mode 100644 index 0000000000000..3a0c5060d007d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_keep_log_file_num_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_KEEP_LOG_FILE_NUM; +SELECT @start_global_value; +@start_global_value +1000 +"Trying to set variable @@global.ROCKSDB_KEEP_LOG_FILE_NUM to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_KEEP_LOG_FILE_NUM = 444; +ERROR HY000: Variable 'rocksdb_keep_log_file_num' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result new file mode 100644 index 0000000000000..eff9e619967c6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_scanned_rows_basic.result @@ -0,0 +1,170 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES(2); +INSERT INTO invalid_values VALUES(1000); +SET @start_global_value = @@global.ROCKSDB_LOCK_SCANNED_ROWS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_LOCK_SCANNED_ROWS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 1" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 1; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 0" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 0; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to on" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = on; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to off" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = off; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to true" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = true; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to false" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = false; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to 1" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = 1; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to 0" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = 0; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to on" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = on; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to off" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = off; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to true" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = true; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@session.ROCKSDB_LOCK_SCANNED_ROWS to false" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = false; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 'aaa'" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 2" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 2; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +"Trying to set variable @@global.ROCKSDB_LOCK_SCANNED_ROWS to 1000" +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = 1000; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +SET @@global.ROCKSDB_LOCK_SCANNED_ROWS = @start_global_value; +SELECT @@global.ROCKSDB_LOCK_SCANNED_ROWS; +@@global.ROCKSDB_LOCK_SCANNED_ROWS +0 +SET @@session.ROCKSDB_LOCK_SCANNED_ROWS = @start_session_value; +SELECT @@session.ROCKSDB_LOCK_SCANNED_ROWS; +@@session.ROCKSDB_LOCK_SCANNED_ROWS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result new file mode 100644 index 0000000000000..38df58202980c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_lock_wait_timeout_basic.result @@ -0,0 +1,72 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +SELECT @start_global_value; +@start_global_value +1 +SET @start_session_value = @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +SELECT @start_session_value; +@start_session_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 1" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 1; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 1024" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 1024; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_LOCK_WAIT_TIMEOUT to 1" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = 1; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +"Trying to set variable @@session.ROCKSDB_LOCK_WAIT_TIMEOUT to 1024" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = 1024; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1024 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = DEFAULT; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_LOCK_WAIT_TIMEOUT to 'aaa'" +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +SET @@global.ROCKSDB_LOCK_WAIT_TIMEOUT = @start_global_value; +SELECT @@global.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@global.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +SET @@session.ROCKSDB_LOCK_WAIT_TIMEOUT = @start_session_value; +SELECT @@session.ROCKSDB_LOCK_WAIT_TIMEOUT; +@@session.ROCKSDB_LOCK_WAIT_TIMEOUT +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result new file mode 100644 index 0000000000000..24cff58426a70 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_log_file_time_to_roll_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_LOG_FILE_TIME_TO_ROLL = 444; +ERROR HY000: Variable 'rocksdb_log_file_time_to_roll' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result new file mode 100644 index 0000000000000..dbb331d235db3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_manifest_preallocation_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE; +SELECT @start_global_value; +@start_global_value +4194304 +"Trying to set variable @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MANIFEST_PREALLOCATION_SIZE = 444; +ERROR HY000: Variable 'rocksdb_manifest_preallocation_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result new file mode 100644 index 0000000000000..3f50772ded511 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_master_skip_tx_api_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_MASTER_SKIP_TX_API; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_MASTER_SKIP_TX_API; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 1" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 1; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 0" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 0; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to on" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = on; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 1" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 1; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 0" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 0; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to on" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = on; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'aaa'" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'bbb'" +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +SET @@global.ROCKSDB_MASTER_SKIP_TX_API = @start_global_value; +SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API; +@@global.ROCKSDB_MASTER_SKIP_TX_API +0 +SET @@session.ROCKSDB_MASTER_SKIP_TX_API = @start_session_value; +SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API; +@@session.ROCKSDB_MASTER_SKIP_TX_API +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result new file mode 100644 index 0000000000000..714f21011272e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(64); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'abc\''); +SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 1" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 1; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 64" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 64; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +64 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +"Trying to set variable @@session.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_max_background_compactions' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 'abc'" +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 'abc'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = @start_global_value; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; +@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result new file mode 100644 index 0000000000000..ff8f2b5997bcf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES = 444; +ERROR HY000: Variable 'rocksdb_max_background_flushes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result new file mode 100644 index 0000000000000..4359ee725d4aa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_log_file_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_LOG_FILE_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_MAX_LOG_FILE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_LOG_FILE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_max_log_file_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result new file mode 100644 index 0000000000000..27cddc9f60a3c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_manifest_file_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE; +SELECT @start_global_value; +@start_global_value +18446744073709551615 +"Trying to set variable @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_MANIFEST_FILE_SIZE = 444; +ERROR HY000: Variable 'rocksdb_max_manifest_file_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result new file mode 100644 index 0000000000000..b058ebf05f8f7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_open_files_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_OPEN_FILES; +SELECT @start_global_value; +@start_global_value +-1 +"Trying to set variable @@global.ROCKSDB_MAX_OPEN_FILES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_OPEN_FILES = 444; +ERROR HY000: Variable 'rocksdb_max_open_files' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result new file mode 100644 index 0000000000000..e417e4d5c4e94 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_row_locks_basic.result @@ -0,0 +1,72 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_MAX_ROW_LOCKS; +SELECT @start_global_value; +@start_global_value +1073741824 +SET @start_session_value = @@session.ROCKSDB_MAX_ROW_LOCKS; +SELECT @start_session_value; +@start_session_value +1073741824 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 1024" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = 1024; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1073741824 +"Trying to set variable @@session.ROCKSDB_MAX_ROW_LOCKS to 1024" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = 1024; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1024 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_MAX_ROW_LOCKS = DEFAULT; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1073741824 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MAX_ROW_LOCKS to 'aaa'" +SET @@global.ROCKSDB_MAX_ROW_LOCKS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +SET @@global.ROCKSDB_MAX_ROW_LOCKS = @start_global_value; +SELECT @@global.ROCKSDB_MAX_ROW_LOCKS; +@@global.ROCKSDB_MAX_ROW_LOCKS +1073741824 +SET @@session.ROCKSDB_MAX_ROW_LOCKS = @start_session_value; +SELECT @@session.ROCKSDB_MAX_ROW_LOCKS; +@@session.ROCKSDB_MAX_ROW_LOCKS +1073741824 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result new file mode 100644 index 0000000000000..58452f580f252 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_subcompactions_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_SUBCOMPACTIONS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_MAX_SUBCOMPACTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_SUBCOMPACTIONS = 444; +ERROR HY000: Variable 'rocksdb_max_subcompactions' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result new file mode 100644 index 0000000000000..22c17c24e19e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_total_wal_size_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_MAX_TOTAL_WAL_SIZE = 444; +ERROR HY000: Variable 'rocksdb_max_total_wal_size' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result new file mode 100644 index 0000000000000..5715b198d5a6b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_buf_size_basic.result @@ -0,0 +1,43 @@ +drop table if exists t1; +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP INDEX kj on t1; +DROP INDEX kij ON t1; +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`), + KEY `kji` (`j`,`i`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`) COMMENT 'rev:cf1' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +COUNT(*) +100 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result new file mode 100644 index 0000000000000..5b73305cd9ead --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_merge_combine_read_size_basic.result @@ -0,0 +1,29 @@ +drop table if exists t1; +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP INDEX kj on t1; +DROP INDEX kij ON t1; +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`), + KEY `kj` (`j`), + KEY `kij` (`i`,`j`), + KEY `kji` (`j`,`i`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result new file mode 100644 index 0000000000000..c2daec327a2a2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_new_table_reader_for_compaction_inputs_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS = 444; +ERROR HY000: Variable 'rocksdb_new_table_reader_for_compaction_inputs' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result new file mode 100644 index 0000000000000..7bd32950303b1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_no_block_cache_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_NO_BLOCK_CACHE; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_NO_BLOCK_CACHE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_NO_BLOCK_CACHE = 444; +ERROR HY000: Variable 'rocksdb_no_block_cache' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result new file mode 100644 index 0000000000000..59042124dc867 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_override_cf_options_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_OVERRIDE_CF_OPTIONS; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_OVERRIDE_CF_OPTIONS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_OVERRIDE_CF_OPTIONS = 444; +ERROR HY000: Variable 'rocksdb_override_cf_options' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result new file mode 100644 index 0000000000000..102d4926e6517 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_paranoid_checks_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_PARANOID_CHECKS; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_PARANOID_CHECKS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PARANOID_CHECKS = 444; +ERROR HY000: Variable 'rocksdb_paranoid_checks' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result new file mode 100644 index 0000000000000..5849fe09a20b6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pause_background_work_basic.result @@ -0,0 +1,75 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 1" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 1; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 0" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 0; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to on" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = on; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to off" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = off; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = DEFAULT; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@session.ROCKSDB_PAUSE_BACKGROUND_WORK to 444. It should fail because it is not session." +SET @@session.ROCKSDB_PAUSE_BACKGROUND_WORK = 444; +ERROR HY000: Variable 'rocksdb_pause_background_work' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 'aaa'" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +"Trying to set variable @@global.ROCKSDB_PAUSE_BACKGROUND_WORK to 'bbb'" +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +SET @@global.ROCKSDB_PAUSE_BACKGROUND_WORK = @start_global_value; +SELECT @@global.ROCKSDB_PAUSE_BACKGROUND_WORK; +@@global.ROCKSDB_PAUSE_BACKGROUND_WORK +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result new file mode 100644 index 0000000000000..292ba58a3a308 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_perf_context_level_basic.result @@ -0,0 +1,114 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(3); +INSERT INTO valid_values VALUES(4); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 1" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 1; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 2" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 2; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +2 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 3" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 3; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +3 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 4" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 4; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +4 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 1" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 1; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 2" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 2; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +2 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 3" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 3; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +3 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +"Trying to set variable @@session.ROCKSDB_PERF_CONTEXT_LEVEL to 4" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = 4; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +4 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = DEFAULT; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_PERF_CONTEXT_LEVEL to 'aaa'" +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +SET @@global.ROCKSDB_PERF_CONTEXT_LEVEL = @start_global_value; +SELECT @@global.ROCKSDB_PERF_CONTEXT_LEVEL; +@@global.ROCKSDB_PERF_CONTEXT_LEVEL +0 +SET @@session.ROCKSDB_PERF_CONTEXT_LEVEL = @start_session_value; +SELECT @@session.ROCKSDB_PERF_CONTEXT_LEVEL; +@@session.ROCKSDB_PERF_CONTEXT_LEVEL +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result new file mode 100644 index 0000000000000..10b187d44e9cd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_path_basic.result @@ -0,0 +1,13 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_PATH; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_PATH to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PERSISTENT_CACHE_PATH = 444; +ERROR HY000: Variable 'rocksdb_persistent_cache_path' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_mb_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_mb_basic.result new file mode 100644 index 0000000000000..d097192545beb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_persistent_cache_size_mb_basic.result @@ -0,0 +1,14 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB = 444; +ERROR HY000: Variable 'rocksdb_persistent_cache_size_mb' is a read only variable +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result new file mode 100644 index 0000000000000..c152ecf1e5ac1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE = 444; +ERROR HY000: Variable 'rocksdb_pin_l0_filter_and_index_blocks_in_cache' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result new file mode 100644 index 0000000000000..02a4b4040d744 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_print_snapshot_conflict_queries_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 1" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 1; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 0" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 0; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to on" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = on; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = DEFAULT; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@session.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 444. It should fail because it is not session." +SET @@session.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 444; +ERROR HY000: Variable 'rocksdb_print_snapshot_conflict_queries' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 'aaa'" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +"Trying to set variable @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES to 'bbb'" +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +SET @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES = @start_global_value; +SELECT @@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES; +@@global.ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result new file mode 100644 index 0000000000000..94eb9e340579a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_rate_limiter_bytes_per_sec_basic.result @@ -0,0 +1,101 @@ +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1), (1000), (1000000), (1000000000), (1000000000000); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''), (3.14); +SET @start_global_value = @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +SELECT @start_global_value; +@start_global_value +10000 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 1000000000000" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 1000000000000; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = DEFAULT; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +"Trying to set variable @@session.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 444; +ERROR HY000: Variable 'rocksdb_rate_limiter_bytes_per_sec' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 'aaa'" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +"Trying to set variable @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC to 3.14" +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = 3.14; +Got one of the listed errors +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +1000000000000 +SET @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC = @start_global_value; +SELECT @@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC; +@@global.ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +10000 +DROP TABLE valid_values; +DROP TABLE invalid_values; +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 0; +Warnings: +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. +SET @@global.rocksdb_rate_limiter_bytes_per_sec = -1; +Warnings: +Warning 1292 Truncated incorrect rocksdb_rate_limiter_bytes_per_s value: '-1' +Warning 1210 RocksDB: rocksdb_rate_limiter_bytes_per_sec cannot be dynamically changed to or from 0. Do a clean shutdown if you want to change it from or to 0. diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result new file mode 100644 index 0000000000000..b218fe034aaf6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_read_free_rpl_tables_basic.result @@ -0,0 +1,65 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('a'); +INSERT INTO valid_values VALUES('b'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_READ_FREE_RPL_TABLES; +SELECT @start_global_value; +@start_global_value + +SET @start_session_value = @@session.ROCKSDB_READ_FREE_RPL_TABLES; +SELECT @start_session_value; +@start_session_value + +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_READ_FREE_RPL_TABLES to a" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = a; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES +a +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES + +"Trying to set variable @@global.ROCKSDB_READ_FREE_RPL_TABLES to b" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = b; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES +b +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES + +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_READ_FREE_RPL_TABLES to a" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = a; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES +a +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES + +"Trying to set variable @@session.ROCKSDB_READ_FREE_RPL_TABLES to b" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = b; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES +b +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES + +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_READ_FREE_RPL_TABLES = @start_global_value; +SELECT @@global.ROCKSDB_READ_FREE_RPL_TABLES; +@@global.ROCKSDB_READ_FREE_RPL_TABLES + +SET @@session.ROCKSDB_READ_FREE_RPL_TABLES = @start_session_value; +SELECT @@session.ROCKSDB_READ_FREE_RPL_TABLES; +@@session.ROCKSDB_READ_FREE_RPL_TABLES + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result new file mode 100644 index 0000000000000..e866787efe00b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_records_in_range_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_RECORDS_IN_RANGE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_RECORDS_IN_RANGE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 1" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 1; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 0" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 0; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 222333" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 222333; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +222333 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 1" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = 1; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 0" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = 0; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@session.ROCKSDB_RECORDS_IN_RANGE to 222333" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = 222333; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +222333 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_RECORDS_IN_RANGE = DEFAULT; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 'aaa'" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +"Trying to set variable @@global.ROCKSDB_RECORDS_IN_RANGE to 'bbb'" +SET @@global.ROCKSDB_RECORDS_IN_RANGE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +SET @@global.ROCKSDB_RECORDS_IN_RANGE = @start_global_value; +SELECT @@global.ROCKSDB_RECORDS_IN_RANGE; +@@global.ROCKSDB_RECORDS_IN_RANGE +0 +SET @@session.ROCKSDB_RECORDS_IN_RANGE = @start_session_value; +SELECT @@session.ROCKSDB_RECORDS_IN_RANGE; +@@session.ROCKSDB_RECORDS_IN_RANGE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result new file mode 100644 index 0000000000000..ea80d88f653c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_seconds_between_stat_computes_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1024); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +SELECT @start_global_value; +@start_global_value +3600 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 1" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 1; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 0" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 0; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 1024" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 1024; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +1024 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = DEFAULT; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@session.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 444. It should fail because it is not session." +SET @@session.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 444; +ERROR HY000: Variable 'rocksdb_seconds_between_stat_computes' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 'aaa'" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +"Trying to set variable @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES to 'bbb'" +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +SET @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES = @start_global_value; +SELECT @@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES; +@@global.ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +3600 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result new file mode 100644 index 0000000000000..94a15275900d7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_signal_drop_index_thread_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 1" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 1; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 0" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 0; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to on" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = on; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = DEFAULT; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@session.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 444. It should fail because it is not session." +SET @@session.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 444; +ERROR HY000: Variable 'rocksdb_signal_drop_index_thread' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 'aaa'" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +"Trying to set variable @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD to 'bbb'" +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +SET @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD = @start_global_value; +SELECT @@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD; +@@global.ROCKSDB_SIGNAL_DROP_INDEX_THREAD +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result new file mode 100644 index 0000000000000..201bc5009ce53 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_bloom_filter_on_read_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 1" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 1; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 0" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 0; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to on" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = on; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 1" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 1; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 0" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 0; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to on" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = on; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 'aaa'" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +"Trying to set variable @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ to 'bbb'" +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +SET @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@global.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +SET @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ; +@@session.ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result new file mode 100644 index 0000000000000..a843851cf26e2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_fill_cache_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_SKIP_FILL_CACHE; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_SKIP_FILL_CACHE; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 1" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 1; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 0" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 0; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to on" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = on; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to 1" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = 1; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to 0" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = 0; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@session.ROCKSDB_SKIP_FILL_CACHE to on" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = on; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_FILL_CACHE = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 'aaa'" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +"Trying to set variable @@global.ROCKSDB_SKIP_FILL_CACHE to 'bbb'" +SET @@global.ROCKSDB_SKIP_FILL_CACHE = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +SET @@global.ROCKSDB_SKIP_FILL_CACHE = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_FILL_CACHE; +@@global.ROCKSDB_SKIP_FILL_CACHE +0 +SET @@session.ROCKSDB_SKIP_FILL_CACHE = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_FILL_CACHE; +@@session.ROCKSDB_SKIP_FILL_CACHE +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result new file mode 100644 index 0000000000000..3e169671cc066 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_skip_unique_check_tables_basic.result @@ -0,0 +1,65 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES("aaa"); +INSERT INTO valid_values VALUES("bbb"); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +SELECT @start_global_value; +@start_global_value +.* +SET @start_session_value = @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +SELECT @start_session_value; +@start_session_value +.* +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to aaa" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = aaa; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +aaa +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +"Trying to set variable @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to bbb" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = bbb; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +bbb +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to aaa" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = aaa; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +aaa +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +"Trying to set variable @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES to bbb" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = bbb; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +bbb +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = DEFAULT; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = @start_global_value; +SELECT @@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@global.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +SET @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES = @start_session_value; +SELECT @@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES; +@@session.ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +.* +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result new file mode 100644 index 0000000000000..2dbf5a55b87a9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_stats_dump_period_sec_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC; +SELECT @start_global_value; +@start_global_value +600 +"Trying to set variable @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_STATS_DUMP_PERIOD_SEC = 444; +ERROR HY000: Variable 'rocksdb_stats_dump_period_sec' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result new file mode 100644 index 0000000000000..a838d660a91d9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_store_row_debug_checksums_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 1" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 0" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to on" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = on; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 1" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 0" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to on" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = on; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 'aaa'" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS to 'bbb'" +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +SET @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = @start_global_value; +SELECT @@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +SET @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS = @start_session_value; +SELECT @@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result new file mode 100644 index 0000000000000..46d238d1fa384 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_check_basic.result @@ -0,0 +1,75 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_CHECK; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 1" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 1; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 0" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 0; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to on" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = on; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to off" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = off; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@session.ROCKSDB_STRICT_COLLATION_CHECK to 444. It should fail because it is not session." +SET @@session.ROCKSDB_STRICT_COLLATION_CHECK = 444; +ERROR HY000: Variable 'rocksdb_strict_collation_check' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 'aaa'" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +"Trying to set variable @@global.ROCKSDB_STRICT_COLLATION_CHECK to 'bbb'" +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +SET @@global.ROCKSDB_STRICT_COLLATION_CHECK = @start_global_value; +SELECT @@global.ROCKSDB_STRICT_COLLATION_CHECK; +@@global.ROCKSDB_STRICT_COLLATION_CHECK +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result new file mode 100644 index 0000000000000..5f748621d2538 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_strict_collation_exceptions_basic.result @@ -0,0 +1,36 @@ +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +SELECT @start_global_value; +@start_global_value + +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to simple table name." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = mytable; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS +mytable +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to regex table name(s)." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS +t.* +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to multiple regex table names." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "s.*,t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS +s.*,t.* +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to empty." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = ""; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS + +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to default." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS + +"Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = 444; +ERROR HY000: Variable 'rocksdb_strict_collation_exceptions' is a GLOBAL variable and should be set with SET GLOBAL +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = @start_global_value; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +@@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result new file mode 100644 index 0000000000000..aa77d73912007 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_supported_compression_types_basic.result @@ -0,0 +1,4 @@ +SET @start_global_value = @@global.ROCKSDB_SUPPORTED_COMPRESSION_TYPES; +"Trying to set variable @@global.ROCKSDB_SUPPORTED_COMPRESSION_TYPES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_SUPPORTED_COMPRESSION_TYPES = 444; +ERROR HY000: Variable 'rocksdb_supported_compression_types' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result new file mode 100644 index 0000000000000..0161a3390828c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_cache_numshardbits_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS; +SELECT @start_global_value; +@start_global_value +6 +"Trying to set variable @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_TABLE_CACHE_NUMSHARDBITS = 444; +ERROR HY000: Variable 'rocksdb_table_cache_numshardbits' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result new file mode 100644 index 0000000000000..6ff47ab9569d1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_table_stats_sampling_pct_basic.result @@ -0,0 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); +SET @start_global_value = @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +SELECT @start_global_value; +@start_global_value +10 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 100" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 100; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 1" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 1; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 0" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 0; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = DEFAULT; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@session.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 444. It should fail because it is not session." +SET @@session.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 444; +ERROR HY000: Variable 'rocksdb_table_stats_sampling_pct' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 'aaa'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to 'bbb'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '-1'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '101'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +"Trying to set variable @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT to '484436'" +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +SET @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT = @start_global_value; +SELECT @@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT; +@@global.ROCKSDB_TABLE_STATS_SAMPLING_PCT +10 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result new file mode 100644 index 0000000000000..25b19ee56a44e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_tmpdir_basic.result @@ -0,0 +1,29 @@ +SET @start_global_value = @@global.rocksdb_tmpdir; +SELECT @start_global_value; +@start_global_value + +select @@session.rocksdb_tmpdir; +@@session.rocksdb_tmpdir + +show global variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +show session variables like 'rocksdb_tmpdir'; +Variable_name Value +rocksdb_tmpdir +select * from information_schema.global_variables where variable_name='rocksdb_tmpdir'; +VARIABLE_NAME VARIABLE_VALUE +ROCKSDB_TMPDIR +select * from information_schema.session_variables where variable_name='rocksdb_tmpdir'; +VARIABLE_NAME VARIABLE_VALUE +ROCKSDB_TMPDIR +set global rocksdb_tmpdir='value'; +set session rocksdb_tmpdir='value'; +set global rocksdb_tmpdir=1.1; +ERROR 42000: Incorrect argument type to variable 'rocksdb_tmpdir' +set global rocksdb_tmpdir=1e1; +ERROR 42000: Incorrect argument type to variable 'rocksdb_tmpdir' +SET @@global.rocksdb_tmpdir = @start_global_value; +SELECT @@global.rocksdb_tmpdir; +@@global.rocksdb_tmpdir + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result new file mode 100644 index 0000000000000..d4ffde8000149 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_trace_sst_api_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_TRACE_SST_API; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_TRACE_SST_API; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 1" +SET @@global.ROCKSDB_TRACE_SST_API = 1; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 0" +SET @@global.ROCKSDB_TRACE_SST_API = 0; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to on" +SET @@global.ROCKSDB_TRACE_SST_API = on; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to 1" +SET @@session.ROCKSDB_TRACE_SST_API = 1; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to 0" +SET @@session.ROCKSDB_TRACE_SST_API = 0; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@session.ROCKSDB_TRACE_SST_API to on" +SET @@session.ROCKSDB_TRACE_SST_API = on; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_TRACE_SST_API = DEFAULT; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 'aaa'" +SET @@global.ROCKSDB_TRACE_SST_API = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +"Trying to set variable @@global.ROCKSDB_TRACE_SST_API to 'bbb'" +SET @@global.ROCKSDB_TRACE_SST_API = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +SET @@global.ROCKSDB_TRACE_SST_API = @start_global_value; +SELECT @@global.ROCKSDB_TRACE_SST_API; +@@global.ROCKSDB_TRACE_SST_API +0 +SET @@session.ROCKSDB_TRACE_SST_API = @start_session_value; +SELECT @@session.ROCKSDB_TRACE_SST_API; +@@session.ROCKSDB_TRACE_SST_API +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result new file mode 100644 index 0000000000000..c9748cc63069a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_unsafe_for_binlog_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 1" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 1; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 0" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 0; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to on" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = on; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to 1" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = 1; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to 0" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = 0; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@session.ROCKSDB_UNSAFE_FOR_BINLOG to on" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = on; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = DEFAULT; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 'aaa'" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +"Trying to set variable @@global.ROCKSDB_UNSAFE_FOR_BINLOG to 'bbb'" +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +SET @@global.ROCKSDB_UNSAFE_FOR_BINLOG = @start_global_value; +SELECT @@global.ROCKSDB_UNSAFE_FOR_BINLOG; +@@global.ROCKSDB_UNSAFE_FOR_BINLOG +0 +SET @@session.ROCKSDB_UNSAFE_FOR_BINLOG = @start_session_value; +SELECT @@session.ROCKSDB_UNSAFE_FOR_BINLOG; +@@session.ROCKSDB_UNSAFE_FOR_BINLOG +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result new file mode 100644 index 0000000000000..ef4007c754902 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_adaptive_mutex_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_ADAPTIVE_MUTEX; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_ADAPTIVE_MUTEX to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_ADAPTIVE_MUTEX = 444; +ERROR HY000: Variable 'rocksdb_use_adaptive_mutex' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result new file mode 100644 index 0000000000000..ec36c309dca40 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_reads_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_READS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_DIRECT_READS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_DIRECT_READS = 444; +ERROR HY000: Variable 'rocksdb_use_direct_reads' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result new file mode 100644 index 0000000000000..4cc787e4586c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_WRITES; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_DIRECT_WRITES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_DIRECT_WRITES = 444; +ERROR HY000: Variable 'rocksdb_use_direct_writes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result new file mode 100644 index 0000000000000..254cc2ceb5d2f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_fsync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_FSYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_FSYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_FSYNC = 444; +ERROR HY000: Variable 'rocksdb_use_fsync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result new file mode 100644 index 0000000000000..c7b874877f82d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_validate_tables_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_VALIDATE_TABLES; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_VALIDATE_TABLES to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_VALIDATE_TABLES = 444; +ERROR HY000: Variable 'rocksdb_validate_tables' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result new file mode 100644 index 0000000000000..ad71c8909a684 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_verify_row_debug_checksums_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 1" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 0" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to on" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = on; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 1" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 1; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 0" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 0; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to on" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = on; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = DEFAULT; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 'aaa'" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +"Trying to set variable @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS to 'bbb'" +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +SET @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = @start_global_value; +SELECT @@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@global.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +SET @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS = @start_session_value; +SELECT @@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS; +@@session.ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result new file mode 100644 index 0000000000000..7da628b73fd70 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_bytes_per_sync_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_BYTES_PER_SYNC; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_WAL_BYTES_PER_SYNC to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_BYTES_PER_SYNC = 444; +ERROR HY000: Variable 'rocksdb_wal_bytes_per_sync' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result new file mode 100644 index 0000000000000..fd76a5ec00f93 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_dir_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_DIR; +SELECT @start_global_value; +@start_global_value + +"Trying to set variable @@global.ROCKSDB_WAL_DIR to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_DIR = 444; +ERROR HY000: Variable 'rocksdb_wal_dir' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result new file mode 100644 index 0000000000000..9fec4a24bd83e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_recovery_mode_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_WAL_RECOVERY_MODE; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 1" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 1; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +1 +"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 0" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 0; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +1 +"Trying to set variable @@session.ROCKSDB_WAL_RECOVERY_MODE to 444. It should fail because it is not session." +SET @@session.ROCKSDB_WAL_RECOVERY_MODE = 444; +ERROR HY000: Variable 'rocksdb_wal_recovery_mode' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 'aaa'" +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +1 +SET @@global.ROCKSDB_WAL_RECOVERY_MODE = @start_global_value; +SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE; +@@global.ROCKSDB_WAL_RECOVERY_MODE +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result new file mode 100644 index 0000000000000..5f03597df3adf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_size_limit_mb_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_SIZE_LIMIT_MB; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_WAL_SIZE_LIMIT_MB to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_SIZE_LIMIT_MB = 444; +ERROR HY000: Variable 'rocksdb_wal_size_limit_mb' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result new file mode 100644 index 0000000000000..23f7fc81e7f14 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_wal_ttl_seconds_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WAL_TTL_SECONDS; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_WAL_TTL_SECONDS to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WAL_TTL_SECONDS = 444; +ERROR HY000: Variable 'rocksdb_wal_ttl_seconds' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result new file mode 100644 index 0000000000000..0d6f7216e9afe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_whole_key_filtering_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_WHOLE_KEY_FILTERING; +SELECT @start_global_value; +@start_global_value +1 +"Trying to set variable @@global.ROCKSDB_WHOLE_KEY_FILTERING to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_WHOLE_KEY_FILTERING = 444; +ERROR HY000: Variable 'rocksdb_whole_key_filtering' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result new file mode 100644 index 0000000000000..b71ee7f91cc4b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_disable_wal_basic.result @@ -0,0 +1,114 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_WRITE_DISABLE_WAL; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_WRITE_DISABLE_WAL; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 1" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 1; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 0" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 0; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to on" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = on; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to off" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = off; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to 1" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = 1; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to 0" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = 0; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to on" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = on; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Trying to set variable @@session.ROCKSDB_WRITE_DISABLE_WAL to off" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = off; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WRITE_DISABLE_WAL to 'aaa'" +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +SET @@global.ROCKSDB_WRITE_DISABLE_WAL = @start_global_value; +SELECT @@global.ROCKSDB_WRITE_DISABLE_WAL; +@@global.ROCKSDB_WRITE_DISABLE_WAL +0 +SET @@session.ROCKSDB_WRITE_DISABLE_WAL = @start_session_value; +SELECT @@session.ROCKSDB_WRITE_DISABLE_WAL; +@@session.ROCKSDB_WRITE_DISABLE_WAL +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result new file mode 100644 index 0000000000000..dbe46858c9418 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_ignore_missing_column_families_basic.result @@ -0,0 +1,100 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +SELECT @start_global_value; +@start_global_value +0 +SET @start_session_value = @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +SELECT @start_session_value; +@start_session_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 1" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 1; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 0" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 0; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to on" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = on; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +'# Setting to valid values in session scope#' +"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 1" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 1; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 0" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 0; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to on" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = on; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +1 +"Setting the session scope variable back to default" +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = DEFAULT; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 'aaa'" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +"Trying to set variable @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES to 'bbb'" +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +SET @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = @start_global_value; +SELECT @@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@global.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +SET @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES = @start_session_value; +SELECT @@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES; +@@session.ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt new file mode 100644 index 0000000000000..431fc33145814 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.opt @@ -0,0 +1,2 @@ +--ignore-db-dirs=.rocksdb --plugin-load=$HA_ROCKSDB_SO + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm new file mode 100644 index 0000000000000..c8452b55227a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/suite.pm @@ -0,0 +1,21 @@ +package My::Suite::Rocksdb_sys_vars; + +# +# Note: The below is copied from ../rocksdb/suite.pm +# +@ISA = qw(My::Suite); +use My::Find; +use File::Basename; +use strict; + +sub is_default { not $::opt_embedded_server } + +my $sst_dump= +::mtr_exe_maybe_exists( + "$::bindir/storage/rocksdb$::opt_vs_config/sst_dump", + "$::path_client_bindir/sst_dump"); +return "RocksDB is not compiled, no sst_dump" unless $sst_dump; +$ENV{MARIAROCKS_SST_DUMP}="$sst_dump"; + +bless { }; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test new file mode 100644 index 0000000000000..fefd9e39af2d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test @@ -0,0 +1,39 @@ +--source include/not_embedded.inc +--source include/not_threadpool.inc + +# This test verifies that *all* MyRocks system variables are tested by the +# rocksdb_sys_vars suite. For every MyRocks system variable there must be a +# _basic.test file. +# + +# +# we can diff in perl or in sql, as it's my_SQL_test suite, do it in sql +# + +perl; + use File::Basename; + my $dirname=dirname($ENV{MYSQLTEST_FILE}); + my @all_tests=<$dirname/*_basic{,_32,_64}.test>; + open(F, '>', "$ENV{MYSQLTEST_VARDIR}/tmp/rocksdb_sys_vars.all_vars.txt") or die; + binmode F; + print F join "\n", sort map { s/_basic(_32|_64)?\.test$//; basename $_ } @all_tests; +EOF + +create table t1 (test_name text) engine=MyISAM; +create table t2 (variable_name text) engine=MyISAM; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval load data infile "$MYSQLTEST_VARDIR/tmp/rocksdb_sys_vars.all_vars.txt" into table t1; + +insert into t2 select variable_name from information_schema.global_variables where variable_name like "rocksdb_%"; +insert into t2 select variable_name from information_schema.session_variables where variable_name like "rocksdb_%"; + +--sorted_result +select variable_name as `There should be *no* long test name listed below:` from t2 + where length(variable_name) > 50; + +--sorted_result +select variable_name as `There should be *no* variables listed below:` from t2 + left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name; + +drop table t1; +drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test new file mode 100644 index 0000000000000..51765574a914a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ACCESS_HINT_ON_COMPACTION_START +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test new file mode 100644 index 0000000000000..06593363df20b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ADVISE_RANDOM_ON_OPEN +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test new file mode 100644 index 0000000000000..4f618609223c3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test new file mode 100644 index 0000000000000..72fed6b22754a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ALLOW_MMAP_READS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test new file mode 100644 index 0000000000000..8b8da14f56c2f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ALLOW_MMAP_WRITES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test new file mode 100644 index 0000000000000..d8efc08226624 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_BACKGROUND_SYNC +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test new file mode 100644 index 0000000000000..9f001ce103ec3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BASE_BACKGROUND_COMPACTIONS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test new file mode 100644 index 0000000000000..da972cccf9a44 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_BLIND_DELETE_PRIMARY_KEY +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test new file mode 100644 index 0000000000000..39688e635567d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_CACHE_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test new file mode 100644 index 0000000000000..0688ef732810e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_RESTART_INTERVAL +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test new file mode 100644 index 0000000000000..150c1e533a73f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test new file mode 100644 index 0000000000000..98d179c028c8b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BLOCK_SIZE_DEVIATION +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test new file mode 100644 index 0000000000000..dd55c849adba7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_BULK_LOAD +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test new file mode 100644 index 0000000000000..70d1c44806acb --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_BULK_LOAD_SIZE +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test new file mode 100644 index 0000000000000..d1d6b2b5695b5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_BYTES_PER_SYNC +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test new file mode 100644 index 0000000000000..27d0aa99d019d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test new file mode 100644 index 0000000000000..b595cb62a56ce --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(99); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_CHECKSUMS_PCT +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test new file mode 100644 index 0000000000000..9c0e111d7b944 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test @@ -0,0 +1,8 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_COLLECT_SST_PROPERTIES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test new file mode 100644 index 0000000000000..ec860cfcfc203 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_COMMIT_IN_THE_MIDDLE +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test new file mode 100644 index 0000000000000..736f6754b6f0b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test @@ -0,0 +1,19 @@ + +call mtr.add_suppression(" Column family '[a-z]*' not found."); + +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_COMPACT_CF +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test new file mode 100644 index 0000000000000..c0651a3a14dd3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as +# we can't set it to or from 0 +SET @@global.rocksdb_compaction_readahead_size = -1; +SELECT @@global.rocksdb_compaction_readahead_size; + +--let $sys_var=ROCKSDB_COMPACTION_READAHEAD_SIZE +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test new file mode 100644 index 0000000000000..24399c85d88b6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test new file mode 100644 index 0000000000000..b3a437d6cd4a0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test new file mode 100644 index 0000000000000..aaf711792216b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test new file mode 100644 index 0000000000000..d5be34695c0d8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); +INSERT INTO valid_values VALUES(2000000); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'2000001\''); + +--let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test new file mode 100644 index 0000000000000..2850c7a1a3863 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test @@ -0,0 +1,29 @@ +--source include/have_rocksdb.inc + +--eval SET @start_value = @@global.ROCKSDB_CREATE_CHECKPOINT + +# Test using tmp/abc +--replace_result $MYSQL_TMP_DIR TMP +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = '$MYSQL_TMP_DIR/abc' +--eval SELECT @@global.ROCKSDB_CREATE_CHECKPOINT +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT + +# Test using tmp/def +--replace_result $MYSQL_TMP_DIR TMP +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = '$MYSQL_TMP_DIR/def' +--eval SELECT @@global.ROCKSDB_CREATE_CHECKPOINT +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT + +# Should fail because it is not a session +--Error ER_GLOBAL_VARIABLE +--eval SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444 + +# Set back to original value +# validate that DEFAULT causes failure in creating checkpoint since +# DEFAULT == '' +--error ER_UNKNOWN_ERROR +--eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value + +# clean up +--exec rm -r $MYSQL_TMP_DIR/abc +--exec rm -r $MYSQL_TMP_DIR/def diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test new file mode 100644 index 0000000000000..ab92a0a08679e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_CREATE_IF_MISSING +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test new file mode 100644 index 0000000000000..21c0f0ead2cab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test new file mode 100644 index 0000000000000..fd3569c8f0a27 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DATADIR +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test new file mode 100644 index 0000000000000..df6a24902af08 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DB_WRITE_BUFFER_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test new file mode 100644 index 0000000000000..980be0f3924e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)); +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_DEADLOCK_DETECT +--let $read_only=0 +--let $session=1 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test new file mode 100644 index 0000000000000..41c4ae6322d26 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test new file mode 100644 index 0000000000000..1febc6db093bf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DEFAULT_CF_OPTIONS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test new file mode 100644 index 0000000000000..8068b6b87e887 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + +--let $sys_var=ROCKSDB_DELAYED_WRITE_RATE +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test new file mode 100644 index 0000000000000..3c2cd2db87ff2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test new file mode 100644 index 0000000000000..0a38895c35a48 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_ENABLE_2PC +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test new file mode 100644 index 0000000000000..52313ffbe700e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_ENABLE_BULK_LOAD_API +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test new file mode 100644 index 0000000000000..566d56563fbf3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_ENABLE_THREAD_TRACKING +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test new file mode 100644 index 0000000000000..1904dd2cd6959 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test new file mode 100644 index 0000000000000..933642a73a6f6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_ERROR_IF_EXISTS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test new file mode 100644 index 0000000000000..3a8ac014c7dd1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test new file mode 100644 index 0000000000000..c7e04f894982e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_memtable_on_analyze_basic.test @@ -0,0 +1,46 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +## +## test cardinality for analyze statements after flushing table +## + +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +--sorted_result +SELECT * FROM t1; + +set session rocksdb_flush_memtable_on_analyze=off; +ANALYZE TABLE t1; +SHOW INDEXES FROM t1; + +set session rocksdb_flush_memtable_on_analyze=on; +ANALYZE TABLE t1; +SHOW INDEXES FROM t1; +DROP TABLE t1; + +## +## test data length for show table status statements for tables with few rows +## + +CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, PRIMARY KEY(a)) ENGINE=rocksdb; +SHOW CREATE TABLE t1; +INSERT INTO t1 (b) VALUES (1); +INSERT INTO t1 (b) VALUES (2); +INSERT INTO t1 (b) VALUES (3); +--sorted_result +SELECT * FROM t1; + +--replace_column 5 # 6 # +SHOW TABLE STATUS LIKE 't1'; +ANALYZE TABLE t1; +--replace_column 5 # 6 # +SHOW TABLE STATUS LIKE 't1'; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test new file mode 100644 index 0000000000000..3a0d7f63938c3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_compute_memtable_stats_basic.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc +--disable_warnings +DROP TABLE IF EXISTS t; +--enable_warnings + +CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb; +INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo'); + +SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats; +set global rocksdb_force_flush_memtable_now = true; + +INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d'); + +set global rocksdb_force_compute_memtable_stats=0; +SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; + +set global rocksdb_force_compute_memtable_stats=1; +SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't'; + +select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end; + +DROP TABLE t; +set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test new file mode 100644 index 0000000000000..4386af1ee19e9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_FORCE_FLUSH_MEMTABLE_NOW +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test new file mode 100644 index 0000000000000..30263ea4aa1ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test @@ -0,0 +1,23 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as +# we can't set it to or from 0 +SET @@session.rocksdb_force_index_records_in_range = -1; +SELECT @@session.rocksdb_force_index_records_in_range; + +--let $sys_var=ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test new file mode 100644 index 0000000000000..e787dd33a3417 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_HASH_INDEX_ALLOW_COLLISION +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test new file mode 100644 index 0000000000000..49369ffd76577 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_INDEX_TYPE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test new file mode 100644 index 0000000000000..fb2ce5e713bf4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test @@ -0,0 +1,21 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('fatal_level'); +INSERT INTO valid_values VALUES('error_level'); +INSERT INTO valid_values VALUES('warn_level'); +INSERT INTO valid_values VALUES('info_level'); +INSERT INTO valid_values VALUES('debug_level'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES(5); +INSERT INTO invalid_values VALUES(6); +INSERT INTO invalid_values VALUES('foo'); + +--let $sys_var=ROCKSDB_INFO_LOG_LEVEL +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test new file mode 100644 index 0000000000000..4d39c2a3656f4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_IS_FD_CLOSE_ON_EXEC +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test new file mode 100644 index 0000000000000..0eff718c14c2d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_KEEP_LOG_FILE_NUM +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test new file mode 100644 index 0000000000000..35b4128c3e56f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES(2); +INSERT INTO invalid_values VALUES(1000); + +--let $sys_var=ROCKSDB_LOCK_SCANNED_ROWS +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test new file mode 100644 index 0000000000000..24096677e1b67 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_LOCK_WAIT_TIMEOUT +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test new file mode 100644 index 0000000000000..63a7c5fedfb6e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_LOG_FILE_TIME_TO_ROLL +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test new file mode 100644 index 0000000000000..6f248ece9e95d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MANIFEST_PREALLOCATION_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test new file mode 100644 index 0000000000000..e0d5925cad660 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_MASTER_SKIP_TX_API +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test new file mode 100644 index 0000000000000..6f0909a24c166 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(64); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'abc\''); + +--let $sys_var=ROCKSDB_MAX_BACKGROUND_COMPACTIONS +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test new file mode 100644 index 0000000000000..db5b7112e9c71 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_BACKGROUND_FLUSHES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test new file mode 100644 index 0000000000000..cbe5d925fdaa5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_LOG_FILE_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test new file mode 100644 index 0000000000000..f399b2967324c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_MANIFEST_FILE_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test new file mode 100644 index 0000000000000..ba3293264aba5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_OPEN_FILES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test new file mode 100644 index 0000000000000..4eb00329cf2bf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_MAX_ROW_LOCKS +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test new file mode 100644 index 0000000000000..a4494dd82625d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_SUBCOMPACTIONS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test new file mode 100644 index 0000000000000..35ba859c6490b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_MAX_TOTAL_WAL_SIZE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test new file mode 100644 index 0000000000000..8e2dda64d4ae2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_buf_size_basic.test @@ -0,0 +1,50 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, FLOOR(RAND() * 100)); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP INDEX kj on t1; +DROP INDEX kij ON t1; + +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; + +# Reverse CF testing, needs to be added to SSTFileWriter in reverse order +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=RocksDB; +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, FLOOR(RAND() * 100)); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kb(b) comment 'rev:cf1', ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; +SELECT COUNT(*) FROM t1 FORCE INDEX(kb); +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test new file mode 100644 index 0000000000000..48e891373448c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_merge_combine_read_size_basic.test @@ -0,0 +1,32 @@ +--source include/have_rocksdb.inc + +--disable_warnings +drop table if exists t1; +--enable_warnings + +set session rocksdb_merge_buf_size=250; +set session rocksdb_merge_combine_read_size=1000; + +CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB; + +--disable_query_log +let $max = 100; +let $i = 1; +while ($i <= $max) { + let $insert = INSERT INTO t1 VALUES ($i, $i); + inc $i; + eval $insert; +} +--enable_query_log + +ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE; +ALTER TABLE t1 ADD INDEX kij(i,j), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP INDEX kj on t1; +DROP INDEX kij ON t1; + +ALTER TABLE t1 ADD INDEX kj(j), ADD INDEX kij(i,j), ADD INDEX kji(j,i), ALGORITHM=INPLACE; +SHOW CREATE TABLE t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test new file mode 100644 index 0000000000000..1d2ea6e666385 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test new file mode 100644 index 0000000000000..be1e3e883927e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_NO_BLOCK_CACHE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test new file mode 100644 index 0000000000000..1f4325b89d6ee --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_OVERRIDE_CF_OPTIONS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test new file mode 100644 index 0000000000000..5bdd9d3d50bf1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_PARANOID_CHECKS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test new file mode 100644 index 0000000000000..3f2f6bc703e53 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_PAUSE_BACKGROUND_WORK +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test new file mode 100644 index 0000000000000..46f7457847116 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(2); +INSERT INTO valid_values VALUES(3); +INSERT INTO valid_values VALUES(4); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_PERF_CONTEXT_LEVEL +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test new file mode 100644 index 0000000000000..1a1146a17ccda --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('abc'); +INSERT INTO valid_values VALUES('def'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_PERSISTENT_CACHE_PATH +--let $read_only=1 +--let $session=0 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test new file mode 100644 index 0000000000000..7f21d96f62c20 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE_MB +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test new file mode 100644 index 0000000000000..d25131062d4a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test new file mode 100644 index 0000000000000..24d2f182fe801 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test new file mode 100644 index 0000000000000..8277011831a70 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test @@ -0,0 +1,63 @@ +--source include/have_rocksdb.inc + +# Attempt to set the value - this should generate a warning as we can't set it to or from 0 +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 10000; + +# Now shut down and come back up with the rate limiter enabled and retest setting the variable + +# Write file to make mysql-test-run.pl expect the "crash", but don't restart the +# server until it is told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" >$_expect_file_name + +# Send shutdown to the connected server and give it 10 seconds to die before +# zapping it +shutdown_server 10; + +# Attempt to restart the server with the rate limiter on +--exec echo "restart:--rocksdb_rate_limiter_bytes_per_sec=10000" >$_expect_file_name +--sleep 5 + +# Wait for reconnect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect + +# The valid_values table lists the values that we want to make sure that the system will allow +# us to set for rocksdb_rate_limiter_bytes_per_sec +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1), (1000), (1000000), (1000000000), (1000000000000); + +# The invalid_values table lists the values that we don't want to allow for the variable +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''), (3.14); + +# Test all the valid and invalid values +--let $sys_var=ROCKSDB_RATE_LIMITER_BYTES_PER_SEC +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + +# Zero is an invalid value if the rate limiter is turned on, but it won't be rejected by the +# SET command but will generate a warning. + +# Attempt to set the value to 0 - this should generate a warning as we can't set it to or from 0 +SET @@global.rocksdb_rate_limiter_bytes_per_sec = 0; + +# Attempt to set the value to -1 - this should first truncate to 0 and then generate a warning as +# we can't set it to or from 0 +SET @@global.rocksdb_rate_limiter_bytes_per_sec = -1; + +# Restart the server without the rate limiter +--exec echo "wait" >$_expect_file_name +shutdown_server 10; +--exec echo "restart" >$_expect_file_name +--sleep 5 + +# Wait for reconnect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test new file mode 100644 index 0000000000000..71f42a47f4bb2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test @@ -0,0 +1,15 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES('a'); +INSERT INTO valid_values VALUES('b'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_READ_FREE_RPL_TABLES +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test new file mode 100644 index 0000000000000..21503475e3e94 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(222333); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_RECORDS_IN_RANGE +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test new file mode 100644 index 0000000000000..53c2e6e62bf0d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(1024); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test new file mode 100644 index 0000000000000..ea90c7b7c58dd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SIGNAL_DROP_INDEX_THREAD +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test new file mode 100644 index 0000000000000..82b56e0bbcb1d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SKIP_BLOOM_FILTER_ON_READ +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test new file mode 100644 index 0000000000000..cc1b608b7b360 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_SKIP_FILL_CACHE +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test new file mode 100644 index 0000000000000..3fe265ae9300b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test @@ -0,0 +1,15 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES("aaa"); +INSERT INTO valid_values VALUES("bbb"); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK_TABLES +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test new file mode 100644 index 0000000000000..2fbb0c6ea6d14 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_STATS_DUMP_PERIOD_SEC +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test new file mode 100644 index 0000000000000..e3faca86717b3 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test new file mode 100644 index 0000000000000..17aa63b8bb303 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_STRICT_COLLATION_CHECK +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test new file mode 100644 index 0000000000000..4eb9648884075 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_exceptions_basic.test @@ -0,0 +1,35 @@ +--source include/have_rocksdb.inc + +# We cannot use the rocskdb_sys_var.inc script as some of the strings we set +# need to be quoted and that doesn't work with this script. Run through +# valid options by hand. + +SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; +SELECT @start_global_value; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to simple table name." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = mytable; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to regex table name(s)." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to multiple regex table names." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = "s.*,t.*"; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to empty." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = ""; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to default." +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = DEFAULT; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; + +--echo "Trying to set @session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS to 444. It should fail because it is not session." +--Error ER_GLOBAL_VARIABLE +SET @@session.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = 444; + +SET @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS = @start_global_value; +SELECT @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test new file mode 100644 index 0000000000000..52bf63c21cced --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_supported_compression_types_basic.test @@ -0,0 +1,7 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_SUPPORTED_COMPRESSION_TYPES +--let $read_only=1 +--let $session=0 +--let $suppress_default_value=1 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test new file mode 100644 index 0000000000000..11bdd6abce83d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_TABLE_CACHE_NUMSHARDBITS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test new file mode 100644 index 0000000000000..3bed5e6ec739c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + +--let $sys_var=ROCKSDB_TABLE_STATS_SAMPLING_PCT +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test new file mode 100644 index 0000000000000..8865914dd18ab --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_tmpdir_basic.test @@ -0,0 +1,38 @@ +--source include/have_rocksdb.inc + +SET @start_global_value = @@global.rocksdb_tmpdir; +SELECT @start_global_value; + +# +# exists as global and session +# +select @@session.rocksdb_tmpdir; + +show global variables like 'rocksdb_tmpdir'; +show session variables like 'rocksdb_tmpdir'; + +select * from information_schema.global_variables where variable_name='rocksdb_tmpdir'; +select * from information_schema.session_variables where variable_name='rocksdb_tmpdir'; + +# +# Show that it is writable +# + +set global rocksdb_tmpdir='value'; +set session rocksdb_tmpdir='value'; + +# +# incorrect types +# +--error ER_WRONG_TYPE_FOR_VAR +set global rocksdb_tmpdir=1.1; +--error ER_WRONG_TYPE_FOR_VAR +set global rocksdb_tmpdir=1e1; + +# +# Cleanup +# + +SET @@global.rocksdb_tmpdir = @start_global_value; +SELECT @@global.rocksdb_tmpdir; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test new file mode 100644 index 0000000000000..259021d31d3de --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_TRACE_SST_API +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test new file mode 100644 index 0000000000000..f5f4536d769f5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_UNSAFE_FOR_BINLOG +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test new file mode 100644 index 0000000000000..7ce7bec1f6ef2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_ADAPTIVE_MUTEX +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test new file mode 100644 index 0000000000000..323b517f1788e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_DIRECT_READS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test new file mode 100644 index 0000000000000..14e6de24652f0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_DIRECT_WRITES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test new file mode 100644 index 0000000000000..90b41c4aa5723 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_FSYNC +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test new file mode 100644 index 0000000000000..ed12b319cfc5f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_VALIDATE_TABLES +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test new file mode 100644 index 0000000000000..352bc9d9cf03a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test new file mode 100644 index 0000000000000..afab0f20d40a7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_BYTES_PER_SYNC +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test new file mode 100644 index 0000000000000..a40c77669f2c8 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_DIR +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test new file mode 100644 index 0000000000000..ce202f2e2b580 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_WAL_RECOVERY_MODE +--let $read_only=0 +--let $session=0 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test new file mode 100644 index 0000000000000..95880ea3e6370 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_SIZE_LIMIT_MB +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test new file mode 100644 index 0000000000000..e65d3851392b0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WAL_TTL_SECONDS +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test new file mode 100644 index 0000000000000..83e8e2382a229 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_WHOLE_KEY_FILTERING +--let $read_only=1 +--let $session=0 +--source include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test new file mode 100644 index 0000000000000..d732bebac7ffc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_WRITE_DISABLE_WAL +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test new file mode 100644 index 0000000000000..f38b7c9601dda --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES +--let $read_only=0 +--let $session=1 +--source include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/properties_collector.cc b/storage/rocksdb/properties_collector.cc new file mode 100644 index 0000000000000..368e6d88d1ccf --- /dev/null +++ b/storage/rocksdb/properties_collector.cc @@ -0,0 +1,524 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#ifdef _WIN32 +#define _CRT_RAND_S +#include +#define rand_r rand_s +#endif +/* This C++ file's header file */ +#include "./properties_collector.h" + +/* Standard C++ header files */ +#include +#include +#include +#include + +/* MySQL header files */ +#include "./log.h" +#include "./my_stacktrace.h" +#include "./sql_array.h" + +/* MyRocks header files */ +#include "./rdb_datadic.h" +#include "./rdb_utils.h" + +namespace myrocks { + +std::atomic rocksdb_num_sst_entry_put(0); +std::atomic rocksdb_num_sst_entry_delete(0); +std::atomic rocksdb_num_sst_entry_singledelete(0); +std::atomic rocksdb_num_sst_entry_merge(0); +std::atomic rocksdb_num_sst_entry_other(0); +my_bool rocksdb_compaction_sequential_deletes_count_sd = false; + +Rdb_tbl_prop_coll::Rdb_tbl_prop_coll(Rdb_ddl_manager *const ddl_manager, + const Rdb_compact_params ¶ms, + const uint32_t &cf_id, + const uint8_t &table_stats_sampling_pct) + : m_cf_id(cf_id), m_ddl_manager(ddl_manager), m_last_stats(nullptr), + m_rows(0l), m_window_pos(0l), m_deleted_rows(0l), m_max_deleted_rows(0l), + m_file_size(0), m_params(params), + m_table_stats_sampling_pct(table_stats_sampling_pct), + m_seed(time(nullptr)), m_card_adj_extra(1.) { + DBUG_ASSERT(ddl_manager != nullptr); + + // We need to adjust the index cardinality numbers based on the sampling + // rate so that the output of "SHOW INDEX" command will reflect reality + // more closely. It will still be an approximation, just a better one. + if (m_table_stats_sampling_pct > 0) { + m_card_adj_extra = 100. / m_table_stats_sampling_pct; + } + + m_deleted_rows_window.resize(m_params.m_window, false); +} + +/* + This function is called by RocksDB for every key in the SST file +*/ +rocksdb::Status Rdb_tbl_prop_coll::AddUserKey(const rocksdb::Slice &key, + const rocksdb::Slice &value, + rocksdb::EntryType type, + rocksdb::SequenceNumber seq, + uint64_t file_size) { + if (key.size() >= 4) { + AdjustDeletedRows(type); + + m_rows++; + + CollectStatsForRow(key, value, type, file_size); + } + + return rocksdb::Status::OK(); +} + +void Rdb_tbl_prop_coll::AdjustDeletedRows(rocksdb::EntryType type) { + if (m_params.m_window > 0) { + // record the "is deleted" flag into the sliding window + // the sliding window is implemented as a circular buffer + // in m_deleted_rows_window vector + // the current position in the circular buffer is pointed at by + // m_rows % m_deleted_rows_window.size() + // m_deleted_rows is the current number of 1's in the vector + // --update the counter for the element which will be overridden + const bool is_delete = (type == rocksdb::kEntryDelete || + (type == rocksdb::kEntrySingleDelete && + rocksdb_compaction_sequential_deletes_count_sd)); + + // Only make changes if the value at the current position needs to change + if (is_delete != m_deleted_rows_window[m_window_pos]) { + // Set or clear the flag at the current position as appropriate + m_deleted_rows_window[m_window_pos] = is_delete; + if (!is_delete) { + m_deleted_rows--; + } else if (++m_deleted_rows > m_max_deleted_rows) { + m_max_deleted_rows = m_deleted_rows; + } + } + + if (++m_window_pos == m_params.m_window) { + m_window_pos = 0; + } + } +} + +Rdb_index_stats *Rdb_tbl_prop_coll::AccessStats(const rocksdb::Slice &key) { + GL_INDEX_ID gl_index_id; + gl_index_id.cf_id = m_cf_id; + gl_index_id.index_id = rdb_netbuf_to_uint32(reinterpret_cast(key.data())); + + if (m_last_stats == nullptr || m_last_stats->m_gl_index_id != gl_index_id) { + m_keydef = nullptr; + + // starting a new table + // add the new element into m_stats + m_stats.emplace_back(gl_index_id); + m_last_stats = &m_stats.back(); + + if (m_ddl_manager) { + // safe_find() returns a std::shared_ptr with the count + // incremented (so it can't be deleted out from under us) and with + // the mutex locked (if setup has not occurred yet). We must make + // sure to free the mutex (via unblock_setup()) when we are done + // with this object. Currently this happens earlier in this function + // when we are switching to a new Rdb_key_def and when this object + // is destructed. + m_keydef = m_ddl_manager->safe_find(gl_index_id); + if (m_keydef != nullptr) { + // resize the array to the number of columns. + // It will be initialized with zeroes + m_last_stats->m_distinct_keys_per_prefix.resize( + m_keydef->get_key_parts()); + m_last_stats->m_name = m_keydef->get_name(); + } + } + m_last_key.clear(); + } + + return m_last_stats; +} + +void Rdb_tbl_prop_coll::CollectStatsForRow(const rocksdb::Slice &key, + const rocksdb::Slice &value, + const rocksdb::EntryType &type, + const uint64_t &file_size) { + const auto stats = AccessStats(key); + + stats->m_data_size += key.size() + value.size(); + + // Incrementing per-index entry-type statistics + switch (type) { + case rocksdb::kEntryPut: + stats->m_rows++; + break; + case rocksdb::kEntryDelete: + stats->m_entry_deletes++; + break; + case rocksdb::kEntrySingleDelete: + stats->m_entry_single_deletes++; + break; + case rocksdb::kEntryMerge: + stats->m_entry_merges++; + break; + case rocksdb::kEntryOther: + stats->m_entry_others++; + break; + default: + // NO_LINT_DEBUG + sql_print_error("RocksDB: Unexpected entry type found: %u. " + "This should not happen so aborting the system.", + type); + abort_with_stack_traces(); + break; + } + + stats->m_actual_disk_size += file_size - m_file_size; + m_file_size = file_size; + + if (m_keydef != nullptr && ShouldCollectStats()) { + std::size_t column = 0; + bool new_key = true; + + if (!m_last_key.empty()) { + rocksdb::Slice last(m_last_key.data(), m_last_key.size()); + new_key = (m_keydef->compare_keys(&last, &key, &column) == 0); + } + + if (new_key) { + DBUG_ASSERT(column <= stats->m_distinct_keys_per_prefix.size()); + + for (auto i = column; i < stats->m_distinct_keys_per_prefix.size(); i++) { + stats->m_distinct_keys_per_prefix[i]++; + } + + // assign new last_key for the next call + // however, we only need to change the last key + // if one of the first n-1 columns is different + // If the n-1 prefix is the same, no sense in storing + // the new key + if (column < stats->m_distinct_keys_per_prefix.size()) { + m_last_key.assign(key.data(), key.size()); + } + } + } +} + +const char *Rdb_tbl_prop_coll::INDEXSTATS_KEY = "__indexstats__"; + +/* + This function is called by RocksDB to compute properties to store in sst file +*/ +rocksdb::Status +Rdb_tbl_prop_coll::Finish(rocksdb::UserCollectedProperties *const properties) { + uint64_t num_sst_entry_put = 0; + uint64_t num_sst_entry_delete = 0; + uint64_t num_sst_entry_singledelete = 0; + uint64_t num_sst_entry_merge = 0; + uint64_t num_sst_entry_other = 0; + + DBUG_ASSERT(properties != nullptr); + + for (auto it = m_stats.begin(); it != m_stats.end(); it++) { + num_sst_entry_put += it->m_rows; + num_sst_entry_delete += it->m_entry_deletes; + num_sst_entry_singledelete += it->m_entry_single_deletes; + num_sst_entry_merge += it->m_entry_merges; + num_sst_entry_other += it->m_entry_others; + } + + if (num_sst_entry_put > 0) { + rocksdb_num_sst_entry_put += num_sst_entry_put; + } + + if (num_sst_entry_delete > 0) { + rocksdb_num_sst_entry_delete += num_sst_entry_delete; + } + + if (num_sst_entry_singledelete > 0) { + rocksdb_num_sst_entry_singledelete += num_sst_entry_singledelete; + } + + if (num_sst_entry_merge > 0) { + rocksdb_num_sst_entry_merge += num_sst_entry_merge; + } + + if (num_sst_entry_other > 0) { + rocksdb_num_sst_entry_other += num_sst_entry_other; + } + + properties->insert({INDEXSTATS_KEY, + Rdb_index_stats::materialize(m_stats, m_card_adj_extra)}); + return rocksdb::Status::OK(); +} + +bool Rdb_tbl_prop_coll::NeedCompact() const { + return m_params.m_deletes && (m_params.m_window > 0) && + (m_file_size > m_params.m_file_size) && + (m_max_deleted_rows > m_params.m_deletes); +} + +bool Rdb_tbl_prop_coll::ShouldCollectStats() { + // Zero means that we'll use all the keys to update statistics. + if (!m_table_stats_sampling_pct || + RDB_TBL_STATS_SAMPLE_PCT_MAX == m_table_stats_sampling_pct) { + return true; + } + + const int val = rand_r(&m_seed) % (RDB_TBL_STATS_SAMPLE_PCT_MAX - + RDB_TBL_STATS_SAMPLE_PCT_MIN + 1) + + RDB_TBL_STATS_SAMPLE_PCT_MIN; + + DBUG_ASSERT(val >= RDB_TBL_STATS_SAMPLE_PCT_MIN); + DBUG_ASSERT(val <= RDB_TBL_STATS_SAMPLE_PCT_MAX); + + return val <= m_table_stats_sampling_pct; +} + +/* + Returns the same as above, but in human-readable way for logging +*/ +rocksdb::UserCollectedProperties +Rdb_tbl_prop_coll::GetReadableProperties() const { + std::string s; +#ifdef DBUG_OFF + s.append("[..."); + s.append(std::to_string(m_stats.size())); + s.append(" records...]"); +#else + bool first = true; + for (auto it : m_stats) { + if (first) { + first = false; + } else { + s.append(","); + } + s.append(GetReadableStats(it)); + } +#endif + return rocksdb::UserCollectedProperties{{INDEXSTATS_KEY, s}}; +} + +std::string Rdb_tbl_prop_coll::GetReadableStats(const Rdb_index_stats &it) { + std::string s; + s.append("("); + s.append(std::to_string(it.m_gl_index_id.cf_id)); + s.append(", "); + s.append(std::to_string(it.m_gl_index_id.index_id)); + s.append("):{name:"); + s.append(it.m_name); + s.append(", size:"); + s.append(std::to_string(it.m_data_size)); + s.append(", m_rows:"); + s.append(std::to_string(it.m_rows)); + s.append(", m_actual_disk_size:"); + s.append(std::to_string(it.m_actual_disk_size)); + s.append(", deletes:"); + s.append(std::to_string(it.m_entry_deletes)); + s.append(", single_deletes:"); + s.append(std::to_string(it.m_entry_single_deletes)); + s.append(", merges:"); + s.append(std::to_string(it.m_entry_merges)); + s.append(", others:"); + s.append(std::to_string(it.m_entry_others)); + s.append(", distincts per prefix: ["); + for (auto num : it.m_distinct_keys_per_prefix) { + s.append(std::to_string(num)); + s.append(" "); + } + s.append("]}"); + return s; +} + +/* + Given the properties of an SST file, reads the stats from it and returns it. +*/ + +void Rdb_tbl_prop_coll::read_stats_from_tbl_props( + const std::shared_ptr &table_props, + std::vector *const out_stats_vector) { + DBUG_ASSERT(out_stats_vector != nullptr); + const auto &user_properties = table_props->user_collected_properties; + const auto it2 = user_properties.find(std::string(INDEXSTATS_KEY)); + if (it2 != user_properties.end()) { + auto result MY_ATTRIBUTE((__unused__)) = + Rdb_index_stats::unmaterialize(it2->second, out_stats_vector); + DBUG_ASSERT(result == 0); + } +} + +/* + Serializes an array of Rdb_index_stats into a network string. +*/ +std::string +Rdb_index_stats::materialize(const std::vector &stats, + const float card_adj_extra) { + String ret; + rdb_netstr_append_uint16(&ret, INDEX_STATS_VERSION_ENTRY_TYPES); + for (const auto &i : stats) { + rdb_netstr_append_uint32(&ret, i.m_gl_index_id.cf_id); + rdb_netstr_append_uint32(&ret, i.m_gl_index_id.index_id); + DBUG_ASSERT(sizeof i.m_data_size <= 8); + rdb_netstr_append_uint64(&ret, i.m_data_size); + rdb_netstr_append_uint64(&ret, i.m_rows); + rdb_netstr_append_uint64(&ret, i.m_actual_disk_size); + rdb_netstr_append_uint64(&ret, i.m_distinct_keys_per_prefix.size()); + rdb_netstr_append_uint64(&ret, i.m_entry_deletes); + rdb_netstr_append_uint64(&ret, i.m_entry_single_deletes); + rdb_netstr_append_uint64(&ret, i.m_entry_merges); + rdb_netstr_append_uint64(&ret, i.m_entry_others); + for (const auto &num_keys : i.m_distinct_keys_per_prefix) { + const float upd_num_keys = num_keys * card_adj_extra; + rdb_netstr_append_uint64(&ret, static_cast(upd_num_keys)); + } + } + + return std::string((char *)ret.ptr(), ret.length()); +} + +/** + @brief + Reads an array of Rdb_index_stats from a string. + @return HA_EXIT_FAILURE if it detects any inconsistency in the input + @return HA_EXIT_SUCCESS if completes successfully +*/ +int Rdb_index_stats::unmaterialize(const std::string &s, + std::vector *const ret) { + const uchar *p = rdb_std_str_to_uchar_ptr(s); + const uchar *const p2 = p + s.size(); + + DBUG_ASSERT(ret != nullptr); + + if (p + 2 > p2) { + return HA_EXIT_FAILURE; + } + + const int version = rdb_netbuf_read_uint16(&p); + Rdb_index_stats stats; + // Make sure version is within supported range. + if (version < INDEX_STATS_VERSION_INITIAL || + version > INDEX_STATS_VERSION_ENTRY_TYPES) { + // NO_LINT_DEBUG + sql_print_error("Index stats version %d was outside of supported range. " + "This should not happen so aborting the system.", + version); + abort_with_stack_traces(); + } + + size_t needed = sizeof(stats.m_gl_index_id.cf_id) + + sizeof(stats.m_gl_index_id.index_id) + + sizeof(stats.m_data_size) + sizeof(stats.m_rows) + + sizeof(stats.m_actual_disk_size) + sizeof(uint64); + if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) { + needed += sizeof(stats.m_entry_deletes) + + sizeof(stats.m_entry_single_deletes) + + sizeof(stats.m_entry_merges) + sizeof(stats.m_entry_others); + } + + while (p < p2) { + if (p + needed > p2) { + return HA_EXIT_FAILURE; + } + rdb_netbuf_read_gl_index(&p, &stats.m_gl_index_id); + stats.m_data_size = rdb_netbuf_read_uint64(&p); + stats.m_rows = rdb_netbuf_read_uint64(&p); + stats.m_actual_disk_size = rdb_netbuf_read_uint64(&p); + stats.m_distinct_keys_per_prefix.resize(rdb_netbuf_read_uint64(&p)); + if (version >= INDEX_STATS_VERSION_ENTRY_TYPES) { + stats.m_entry_deletes = rdb_netbuf_read_uint64(&p); + stats.m_entry_single_deletes = rdb_netbuf_read_uint64(&p); + stats.m_entry_merges = rdb_netbuf_read_uint64(&p); + stats.m_entry_others = rdb_netbuf_read_uint64(&p); + } + if (p + + stats.m_distinct_keys_per_prefix.size() * + sizeof(stats.m_distinct_keys_per_prefix[0]) > + p2) { + return HA_EXIT_FAILURE; + } + for (std::size_t i = 0; i < stats.m_distinct_keys_per_prefix.size(); i++) { + stats.m_distinct_keys_per_prefix[i] = rdb_netbuf_read_uint64(&p); + } + ret->push_back(stats); + } + return HA_EXIT_SUCCESS; +} + +/* + Merges one Rdb_index_stats into another. Can be used to come up with the stats + for the index based on stats for each sst +*/ +void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment, + const int64_t &estimated_data_len) { + std::size_t i; + + DBUG_ASSERT(estimated_data_len >= 0); + + m_gl_index_id = s.m_gl_index_id; + if (m_distinct_keys_per_prefix.size() < s.m_distinct_keys_per_prefix.size()) { + m_distinct_keys_per_prefix.resize(s.m_distinct_keys_per_prefix.size()); + } + if (increment) { + m_rows += s.m_rows; + m_data_size += s.m_data_size; + + /* + The Data_length and Avg_row_length are trailing statistics, meaning + they don't get updated for the current SST until the next SST is + written. So, if rocksdb reports the data_length as 0, + we make a reasoned estimate for the data_file_length for the + index in the current SST. + */ + m_actual_disk_size += s.m_actual_disk_size ? s.m_actual_disk_size + : estimated_data_len * s.m_rows; + m_entry_deletes += s.m_entry_deletes; + m_entry_single_deletes += s.m_entry_single_deletes; + m_entry_merges += s.m_entry_merges; + m_entry_others += s.m_entry_others; + if (s.m_distinct_keys_per_prefix.size() > 0) { + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i]; + } + } else { + for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] += + s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1); + } + } + } else { + m_rows -= s.m_rows; + m_data_size -= s.m_data_size; + m_actual_disk_size -= s.m_actual_disk_size ? s.m_actual_disk_size + : estimated_data_len * s.m_rows; + m_entry_deletes -= s.m_entry_deletes; + m_entry_single_deletes -= s.m_entry_single_deletes; + m_entry_merges -= s.m_entry_merges; + m_entry_others -= s.m_entry_others; + if (s.m_distinct_keys_per_prefix.size() > 0) { + for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i]; + } + } else { + for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) { + m_distinct_keys_per_prefix[i] -= + s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1); + } + } + } +} + +} // namespace myrocks diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h new file mode 100644 index 0000000000000..9ae519d95c7a3 --- /dev/null +++ b/storage/rocksdb/properties_collector.h @@ -0,0 +1,178 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ system header files */ +#include +#include +#include +#include +#include + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" + +namespace myrocks { + +class Rdb_ddl_manager; +class Rdb_key_def; + +extern std::atomic rocksdb_num_sst_entry_put; +extern std::atomic rocksdb_num_sst_entry_delete; +extern std::atomic rocksdb_num_sst_entry_singledelete; +extern std::atomic rocksdb_num_sst_entry_merge; +extern std::atomic rocksdb_num_sst_entry_other; +extern my_bool rocksdb_compaction_sequential_deletes_count_sd; + +struct Rdb_compact_params { + uint64_t m_deletes, m_window, m_file_size; +}; + +struct Rdb_index_stats { + enum { + INDEX_STATS_VERSION_INITIAL = 1, + INDEX_STATS_VERSION_ENTRY_TYPES = 2, + }; + GL_INDEX_ID m_gl_index_id; + int64_t m_data_size, m_rows, m_actual_disk_size; + int64_t m_entry_deletes, m_entry_single_deletes; + int64_t m_entry_merges, m_entry_others; + std::vector m_distinct_keys_per_prefix; + std::string m_name; // name is not persisted + + static std::string materialize(const std::vector &stats, + const float card_adj_extra); + static int unmaterialize(const std::string &s, + std::vector *const ret); + + Rdb_index_stats() : Rdb_index_stats({0, 0}) {} + explicit Rdb_index_stats(GL_INDEX_ID gl_index_id) + : m_gl_index_id(gl_index_id), m_data_size(0), m_rows(0), + m_actual_disk_size(0), m_entry_deletes(0), m_entry_single_deletes(0), + m_entry_merges(0), m_entry_others(0) {} + + void merge(const Rdb_index_stats &s, const bool &increment = true, + const int64_t &estimated_data_len = 0); +}; + +class Rdb_tbl_prop_coll : public rocksdb::TablePropertiesCollector { +public: + Rdb_tbl_prop_coll(Rdb_ddl_manager *const ddl_manager, + const Rdb_compact_params ¶ms, const uint32_t &cf_id, + const uint8_t &table_stats_sampling_pct); + + /* + Override parent class's virtual methods of interest. + */ + + virtual rocksdb::Status AddUserKey(const rocksdb::Slice &key, + const rocksdb::Slice &value, + rocksdb::EntryType type, + rocksdb::SequenceNumber seq, + uint64_t file_size); + + virtual rocksdb::Status + Finish(rocksdb::UserCollectedProperties *properties) override; + + virtual const char *Name() const override { return "Rdb_tbl_prop_coll"; } + + rocksdb::UserCollectedProperties GetReadableProperties() const override; + + bool NeedCompact() const override; + +public: + uint64_t GetMaxDeletedRows() const { return m_max_deleted_rows; } + + static void read_stats_from_tbl_props( + const std::shared_ptr &table_props, + std::vector *out_stats_vector); + +private: + static std::string GetReadableStats(const Rdb_index_stats &it); + + bool ShouldCollectStats(); + void CollectStatsForRow(const rocksdb::Slice &key, + const rocksdb::Slice &value, + const rocksdb::EntryType &type, + const uint64_t &file_size); + Rdb_index_stats *AccessStats(const rocksdb::Slice &key); + void AdjustDeletedRows(rocksdb::EntryType type); + +private: + uint32_t m_cf_id; + std::shared_ptr m_keydef; + Rdb_ddl_manager *m_ddl_manager; + std::vector m_stats; + Rdb_index_stats *m_last_stats; + static const char *INDEXSTATS_KEY; + + // last added key + std::string m_last_key; + + // floating window to count deleted rows + std::vector m_deleted_rows_window; + uint64_t m_rows, m_window_pos, m_deleted_rows, m_max_deleted_rows; + uint64_t m_file_size; + Rdb_compact_params m_params; + uint8_t m_table_stats_sampling_pct; + unsigned int m_seed; + float m_card_adj_extra; +}; + +class Rdb_tbl_prop_coll_factory + : public rocksdb::TablePropertiesCollectorFactory { +public: + Rdb_tbl_prop_coll_factory(const Rdb_tbl_prop_coll_factory &) = delete; + Rdb_tbl_prop_coll_factory & + operator=(const Rdb_tbl_prop_coll_factory &) = delete; + + explicit Rdb_tbl_prop_coll_factory(Rdb_ddl_manager *ddl_manager) + : m_ddl_manager(ddl_manager) {} + + /* + Override parent class's virtual methods of interest. + */ + + virtual rocksdb::TablePropertiesCollector *CreateTablePropertiesCollector( + rocksdb::TablePropertiesCollectorFactory::Context context) override { + return new Rdb_tbl_prop_coll(m_ddl_manager, m_params, + context.column_family_id, + m_table_stats_sampling_pct); + } + + virtual const char *Name() const override { + return "Rdb_tbl_prop_coll_factory"; + } + +public: + void SetCompactionParams(const Rdb_compact_params ¶ms) { + m_params = params; + } + + void SetTableStatsSamplingPct(const uint8_t &table_stats_sampling_pct) { + m_table_stats_sampling_pct = table_stats_sampling_pct; + } + +private: + Rdb_ddl_manager *const m_ddl_manager; + Rdb_compact_params m_params; + uint8_t m_table_stats_sampling_pct; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h new file mode 100644 index 0000000000000..c2d039b46c57b --- /dev/null +++ b/storage/rocksdb/rdb_buff.h @@ -0,0 +1,439 @@ +/* + Portions Copyright (c) 2016-Present, Facebook, Inc. + Portions Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include +#include +#include + +#ifdef _WIN32 +#include +#define htobe64 _byteswap_uint64 +#define be64toh _byteswap_uint64 +#define htobe32 _byteswap_ulong +#define be32toh _byteswap_ulong +#define htobe16 _byteswap_ushort +#define be16toh _byteswap_ushort +#endif + +namespace myrocks { + +/* + Basic composition functions for a network buffer presented as a MySQL String + ("netstr") which stores data in Network Byte Order (Big Endian). +*/ + +inline void rdb_netstr_append_uint64(my_core::String *const out_netstr, + const uint64 &val) { + DBUG_ASSERT(out_netstr != nullptr); + + // Convert from host machine byte order (usually Little Endian) to network + // byte order (Big Endian). + uint64 net_val = htobe64(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); +} + +inline void rdb_netstr_append_uint32(my_core::String *const out_netstr, + const uint32 &val) { + DBUG_ASSERT(out_netstr != nullptr); + + // Convert from host machine byte order (usually Little Endian) to network + // byte order (Big Endian). + uint32 net_val = htobe32(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); +} + +inline void rdb_netstr_append_uint16(my_core::String *const out_netstr, + const uint16 &val) { + DBUG_ASSERT(out_netstr != nullptr); + + // Convert from host machine byte order (usually Little Endian) to network + // byte order (Big Endian). + uint16 net_val = htobe16(val); + out_netstr->append(reinterpret_cast(&net_val), sizeof(net_val)); +} + +/* + Basic network buffer ("netbuf") write helper functions. +*/ + +inline void rdb_netbuf_store_uint64(uchar *const dst_netbuf, const uint64 &n) { + DBUG_ASSERT(dst_netbuf != nullptr); + + // Convert from host byte order (usually Little Endian) to network byte order + // (Big Endian). + uint64 net_val = htobe64(n); + memcpy(dst_netbuf, &net_val, sizeof(net_val)); +} + +inline void rdb_netbuf_store_uint32(uchar *const dst_netbuf, const uint32 &n) { + DBUG_ASSERT(dst_netbuf != nullptr); + + // Convert from host byte order (usually Little Endian) to network byte order + // (Big Endian). + uint32 net_val = htobe32(n); + memcpy(dst_netbuf, &net_val, sizeof(net_val)); +} + +inline void rdb_netbuf_store_uint16(uchar *const dst_netbuf, const uint16 &n) { + DBUG_ASSERT(dst_netbuf != nullptr); + + // Convert from host byte order (usually Little Endian) to network byte order + // (Big Endian). + uint16 net_val = htobe16(n); + memcpy(dst_netbuf, &net_val, sizeof(net_val)); +} + +inline void rdb_netbuf_store_byte(uchar *const dst_netbuf, const uchar &c) { + DBUG_ASSERT(dst_netbuf != nullptr); + + *dst_netbuf = c; +} + +inline void rdb_netbuf_store_index(uchar *const dst_netbuf, + const uint32 &number) { + DBUG_ASSERT(dst_netbuf != nullptr); + + rdb_netbuf_store_uint32(dst_netbuf, number); +} + +/* + Basic conversion helper functions from network byte order (Big Endian) to host + machine byte order (usually Little Endian). +*/ + +inline uint64 rdb_netbuf_to_uint64(const uchar *const netbuf) { + DBUG_ASSERT(netbuf != nullptr); + + uint64 net_val; + memcpy(&net_val, netbuf, sizeof(net_val)); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + return be64toh(net_val); +} + +inline uint32 rdb_netbuf_to_uint32(const uchar *const netbuf) { + DBUG_ASSERT(netbuf != nullptr); + + uint32 net_val; + memcpy(&net_val, netbuf, sizeof(net_val)); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + return be32toh(net_val); +} + +inline uint16 rdb_netbuf_to_uint16(const uchar *const netbuf) { + DBUG_ASSERT(netbuf != nullptr); + + uint16 net_val; + memcpy(&net_val, netbuf, sizeof(net_val)); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + return be16toh(net_val); +} + +inline uchar rdb_netbuf_to_byte(const uchar *const netbuf) { + DBUG_ASSERT(netbuf != nullptr); + + return (uchar)netbuf[0]; +} + +/* + Basic network buffer ("netbuf") read helper functions. + Network buffer stores data in Network Byte Order (Big Endian). + NB: The netbuf is passed as an input/output param, hence after reading, + the netbuf pointer gets advanced to the following byte. +*/ + +inline uint64 rdb_netbuf_read_uint64(const uchar **netbuf_ptr) { + DBUG_ASSERT(netbuf_ptr != nullptr); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + const uint64 host_val = rdb_netbuf_to_uint64(*netbuf_ptr); + + // Advance pointer. + *netbuf_ptr += sizeof(host_val); + + return host_val; +} + +inline uint32 rdb_netbuf_read_uint32(const uchar **netbuf_ptr) { + DBUG_ASSERT(netbuf_ptr != nullptr); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + const uint32 host_val = rdb_netbuf_to_uint32(*netbuf_ptr); + + // Advance pointer. + *netbuf_ptr += sizeof(host_val); + + return host_val; +} + +inline uint16 rdb_netbuf_read_uint16(const uchar **netbuf_ptr) { + DBUG_ASSERT(netbuf_ptr != nullptr); + + // Convert from network byte order (Big Endian) to host machine byte order + // (usually Little Endian). + const uint16 host_val = rdb_netbuf_to_uint16(*netbuf_ptr); + + // Advance pointer. + *netbuf_ptr += sizeof(host_val); + + return host_val; +} + +inline void rdb_netbuf_read_gl_index(const uchar **netbuf_ptr, + GL_INDEX_ID *const gl_index_id) { + DBUG_ASSERT(gl_index_id != nullptr); + DBUG_ASSERT(netbuf_ptr != nullptr); + + gl_index_id->cf_id = rdb_netbuf_read_uint32(netbuf_ptr); + gl_index_id->index_id = rdb_netbuf_read_uint32(netbuf_ptr); +} + +/* + A simple string reader: + - it keeps position within the string that we read from + - it prevents one from reading beyond the end of the string. +*/ + +class Rdb_string_reader { + const char *m_ptr; + uint m_len; + +private: + Rdb_string_reader &operator=(const Rdb_string_reader &) = default; + +public: + Rdb_string_reader(const Rdb_string_reader &) = default; + /* named constructor */ + static Rdb_string_reader read_or_empty(const rocksdb::Slice *const slice) { + if (!slice) { + return Rdb_string_reader(""); + } else { + return Rdb_string_reader(slice); + } + } + + explicit Rdb_string_reader(const std::string &str) { + m_len = str.length(); + if (m_len) { + m_ptr = &str.at(0); + } else { + /* + One can a create a Rdb_string_reader for reading from an empty string + (although attempts to read anything will fail). + We must not access str.at(0), since len==0, we can set ptr to any + value. + */ + m_ptr = nullptr; + } + } + + explicit Rdb_string_reader(const rocksdb::Slice *const slice) { + m_ptr = slice->data(); + m_len = slice->size(); + } + + /* + Read the next @param size bytes. Returns pointer to the bytes read, or + nullptr if the remaining string doesn't have that many bytes. + */ + const char *read(const uint &size) { + const char *res; + if (m_len < size) { + res = nullptr; + } else { + res = m_ptr; + m_ptr += size; + m_len -= size; + } + return res; + } + + bool read_uint8(uint *const res) { + const uchar *p; + if (!(p = reinterpret_cast(read(1)))) + return true; // error + else { + *res = *p; + return false; // Ok + } + } + + bool read_uint16(uint *const res) { + const uchar *p; + if (!(p = reinterpret_cast(read(2)))) + return true; // error + else { + *res = rdb_netbuf_to_uint16(p); + return false; // Ok + } + } + + uint remaining_bytes() const { return m_len; } + + /* + Return pointer to data that will be read by next read() call (if there is + nothing left to read, returns pointer to beyond the end of previous read() + call) + */ + const char *get_current_ptr() const { return m_ptr; } +}; + +/* + @brief + A buffer one can write the data to. + + @detail + Suggested usage pattern: + + writer->clear(); + writer->write_XXX(...); + ... + // Ok, writer->ptr() points to the data written so far, + // and writer->get_current_pos() is the length of the data + +*/ + +class Rdb_string_writer { + std::vector m_data; + +public: + Rdb_string_writer(const Rdb_string_writer &) = delete; + Rdb_string_writer &operator=(const Rdb_string_writer &) = delete; + Rdb_string_writer() = default; + + void clear() { m_data.clear(); } + void write_uint8(const uint &val) { + m_data.push_back(static_cast(val)); + } + + void write_uint16(const uint &val) { + const auto size = m_data.size(); + m_data.resize(size + 2); + rdb_netbuf_store_uint16(m_data.data() + size, val); + } + + void write_uint32(const uint &val) { + const auto size = m_data.size(); + m_data.resize(size + 4); + rdb_netbuf_store_uint32(m_data.data() + size, val); + } + + void write(const uchar *const new_data, const size_t &len) { + DBUG_ASSERT(new_data != nullptr); + m_data.insert(m_data.end(), new_data, new_data + len); + } + + uchar *ptr() { return m_data.data(); } + size_t get_current_pos() const { return m_data.size(); } + + void write_uint8_at(const size_t &pos, const uint &new_val) { + // This function will only overwrite what was written + DBUG_ASSERT(pos < get_current_pos()); + m_data.data()[pos] = new_val; + } + + void write_uint16_at(const size_t &pos, const uint &new_val) { + // This function will only overwrite what was written + DBUG_ASSERT(pos < get_current_pos() && (pos + 1) < get_current_pos()); + rdb_netbuf_store_uint16(m_data.data() + pos, new_val); + } +}; + +/* + A helper class for writing bits into Rdb_string_writer. + + The class assumes (but doesn't check) that nobody tries to write + anything to the Rdb_string_writer that it is writing to. +*/ +class Rdb_bit_writer { + Rdb_string_writer *m_writer; + uchar m_offset; + +public: + Rdb_bit_writer(const Rdb_bit_writer &) = delete; + Rdb_bit_writer &operator=(const Rdb_bit_writer &) = delete; + + explicit Rdb_bit_writer(Rdb_string_writer *writer_arg) + : m_writer(writer_arg), m_offset(0) {} + + void write(uint size, const uint &value) { + DBUG_ASSERT((value & ((1 << size) - 1)) == value); + + while (size > 0) { + if (m_offset == 0) { + m_writer->write_uint8(0); + } + // number of bits to put in this byte + const uint bits = std::min(size, (uint)(8 - m_offset)); + uchar *const last_byte = + m_writer->ptr() + m_writer->get_current_pos() - 1; + *last_byte |= (uchar)((value >> (size - bits)) & ((1 << bits) - 1)) + << m_offset; + size -= bits; + m_offset = (m_offset + bits) & 0x7; + } + } +}; + +class Rdb_bit_reader { + const uchar *m_cur; + uchar m_offset; + uint m_ret; + Rdb_string_reader *const m_reader; + +public: + Rdb_bit_reader(const Rdb_bit_reader &) = delete; + Rdb_bit_reader &operator=(const Rdb_bit_reader &) = delete; + + explicit Rdb_bit_reader(Rdb_string_reader *const reader) + : m_cur(nullptr), m_offset(0), m_reader(reader) {} + + // Returns a pointer to an uint containing the bits read. On subsequent + // reads, the value being pointed to will be overwritten. Returns nullptr + // on failure. + uint *read(uint size) { + m_ret = 0; + DBUG_ASSERT(size <= 32); + + while (size > 0) { + if (m_offset == 0) { + m_cur = (const uchar *)m_reader->read(1); + if (m_cur == nullptr) { + return nullptr; + } + } + // how many bits from the current byte? + const uint bits = std::min((uint)(8 - m_offset), size); + m_ret <<= bits; + m_ret |= (*m_cur >> m_offset) & ((1 << bits) - 1); + size -= bits; + m_offset = (m_offset + bits) & 0x7; + } + + return &m_ret; + } +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc new file mode 100644 index 0000000000000..f8a792d5c3a32 --- /dev/null +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -0,0 +1,242 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* For use of 'PRIu64': */ +#define __STDC_FORMAT_MACROS + +#include + +#include + +/* This C++ files header file */ +#include "./rdb_cf_manager.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./ha_rocksdb_proto.h" +#include "./rdb_psi.h" + +namespace myrocks { + +/* Check if ColumnFamily name says it's a reverse-ordered CF */ +bool Rdb_cf_manager::is_cf_name_reverse(const char *const name) { + /* nullptr means the default CF is used.. (TODO: can the default CF be + * reverse?) */ + return (name && !strncmp(name, "rev:", 4)); +} + +void Rdb_cf_manager::init( + Rdb_cf_options *const cf_options, + std::vector *const handles) { + mysql_mutex_init(rdb_cfm_mutex_key, &m_mutex, MY_MUTEX_INIT_FAST); + + DBUG_ASSERT(cf_options != nullptr); + DBUG_ASSERT(handles != nullptr); + DBUG_ASSERT(handles->size() > 0); + + m_cf_options = cf_options; + + for (auto cfh : *handles) { + DBUG_ASSERT(cfh != nullptr); + m_cf_name_map[cfh->GetName()] = cfh; + m_cf_id_map[cfh->GetID()] = cfh; + } +} + +void Rdb_cf_manager::cleanup() { + for (auto it : m_cf_name_map) { + delete it.second; + } + mysql_mutex_destroy(&m_mutex); +} + +/** + Generate Column Family name for per-index column families + + @param res OUT Column Family name +*/ + +void Rdb_cf_manager::get_per_index_cf_name(const std::string &db_table_name, + const char *const index_name, + std::string *const res) { + DBUG_ASSERT(index_name != nullptr); + DBUG_ASSERT(res != nullptr); + + *res = db_table_name + "." + index_name; +} + +/* + @brief + Find column family by name. If it doesn't exist, create it + + @detail + See Rdb_cf_manager::get_cf +*/ +rocksdb::ColumnFamilyHandle * +Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, + const std::string &db_table_name, + const char *const index_name, + bool *const is_automatic) { + DBUG_ASSERT(rdb != nullptr); + DBUG_ASSERT(is_automatic != nullptr); + + rocksdb::ColumnFamilyHandle *cf_handle = nullptr; + + RDB_MUTEX_LOCK_CHECK(m_mutex); + + *is_automatic = false; + + if (cf_name == nullptr || *cf_name == '\0') { + cf_name = DEFAULT_CF_NAME; + } + + DBUG_ASSERT(cf_name != nullptr); + + std::string per_index_name; + + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { + get_per_index_cf_name(db_table_name, index_name, &per_index_name); + cf_name = per_index_name.c_str(); + *is_automatic = true; + } + + const auto it = m_cf_name_map.find(cf_name); + + if (it != m_cf_name_map.end()) { + cf_handle = it->second; + } else { + /* Create a Column Family. */ + const std::string cf_name_str(cf_name); + rocksdb::ColumnFamilyOptions opts; + m_cf_options->get_cf_options(cf_name_str, &opts); + + // NO_LINT_DEBUG + sql_print_information("RocksDB: creating a column family %s", + cf_name_str.c_str()); + sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); + sql_print_information(" target_file_size_base=%" PRIu64, + opts.target_file_size_base); + + const rocksdb::Status s = + rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); + + if (s.ok()) { + m_cf_name_map[cf_handle->GetName()] = cf_handle; + m_cf_id_map[cf_handle->GetID()] = cf_handle; + } else { + cf_handle = nullptr; + } + } + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return cf_handle; +} + +/* + Find column family by its cf_name. + + @detail + dbname.tablename and index_name are also parameters, because + cf_name=PER_INDEX_CF_NAME means that column family name is a function + of table/index name. + + @param out is_automatic TRUE<=> column family name is auto-assigned based on + db_table_name and index_name. +*/ + +rocksdb::ColumnFamilyHandle * +Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name, + const char *const index_name, + bool *const is_automatic) const { + DBUG_ASSERT(is_automatic != nullptr); + + rocksdb::ColumnFamilyHandle *cf_handle; + + *is_automatic = false; + + RDB_MUTEX_LOCK_CHECK(m_mutex); + + if (cf_name == nullptr) { + cf_name = DEFAULT_CF_NAME; + } + + std::string per_index_name; + + if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { + get_per_index_cf_name(db_table_name, index_name, &per_index_name); + DBUG_ASSERT(!per_index_name.empty()); + cf_name = per_index_name.c_str(); + *is_automatic = true; + } + + const auto it = m_cf_name_map.find(cf_name); + cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr; + + if (!cf_handle) { + // NO_LINT_DEBUG + sql_print_warning("Column family '%s' not found.", cf_name); + } + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return cf_handle; +} + +rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const { + rocksdb::ColumnFamilyHandle *cf_handle = nullptr; + + RDB_MUTEX_LOCK_CHECK(m_mutex); + const auto it = m_cf_id_map.find(id); + if (it != m_cf_id_map.end()) + cf_handle = it->second; + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return cf_handle; +} + +std::vector Rdb_cf_manager::get_cf_names(void) const { + std::vector names; + + RDB_MUTEX_LOCK_CHECK(m_mutex); + for (auto it : m_cf_name_map) { + names.push_back(it.first); + } + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return names; +} + +std::vector +Rdb_cf_manager::get_all_cf(void) const { + std::vector list; + + RDB_MUTEX_LOCK_CHECK(m_mutex); + + for (auto it : m_cf_id_map) { + list.push_back(it.second); + } + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return list; +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_manager.h b/storage/rocksdb/rdb_cf_manager.h new file mode 100644 index 0000000000000..7b9654f3537fd --- /dev/null +++ b/storage/rocksdb/rdb_cf_manager.h @@ -0,0 +1,111 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ system header files */ +#include +#include +#include + +/* MySQL header files */ +#include "./sql_class.h" + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./rdb_cf_options.h" + +namespace myrocks { + +/* + We need a Column Family (CF) manager. Its functions: + - create column families (synchronized, don't create the same twice) + - keep count in each column family. + = the count is kept on-disk. + = there are no empty CFs. initially count=1. + = then, when doing DDL, we increase or decrease it. + (atomicity is maintained by being in the same WriteBatch with DDLs) + = if DROP discovers that now count=0, it removes the CF. + + Current state is: + - CFs are created in a synchronized way. We can't remove them, yet. +*/ + +class Rdb_cf_manager { + std::map m_cf_name_map; + std::map m_cf_id_map; + + mutable mysql_mutex_t m_mutex; + + static void get_per_index_cf_name(const std::string &db_table_name, + const char *const index_name, + std::string *const res); + + Rdb_cf_options *m_cf_options = nullptr; + +public: + Rdb_cf_manager(const Rdb_cf_manager &) = delete; + Rdb_cf_manager &operator=(const Rdb_cf_manager &) = delete; + Rdb_cf_manager() = default; + + static bool is_cf_name_reverse(const char *const name); + + /* + This is called right after the DB::Open() call. The parameters describe + column + families that are present in the database. The first CF is the default CF. + */ + void init(Rdb_cf_options *cf_options, + std::vector *const handles); + void cleanup(); + + /* + Used by CREATE TABLE. + - cf_name=nullptr means use default column family + - cf_name=_auto_ means use 'dbname.tablename.indexname' + */ + rocksdb::ColumnFamilyHandle * + get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, + const std::string &db_table_name, + const char *const index_name, bool *const is_automatic); + + /* Used by table open */ + rocksdb::ColumnFamilyHandle *get_cf(const char *cf_name, + const std::string &db_table_name, + const char *const index_name, + bool *const is_automatic) const; + + /* Look up cf by id; used by datadic */ + rocksdb::ColumnFamilyHandle *get_cf(const uint32_t &id) const; + + /* Used to iterate over column families for show status */ + std::vector get_cf_names(void) const; + + /* Used to iterate over column families */ + std::vector get_all_cf(void) const; + + // void drop_cf(); -- not implemented so far. + + void get_cf_options(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts) + MY_ATTRIBUTE((__nonnull__)) { + m_cf_options->get_cf_options(cf_name, opts); + } +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc new file mode 100644 index 0000000000000..1bf727dfb9c63 --- /dev/null +++ b/storage/rocksdb/rdb_cf_options.cc @@ -0,0 +1,314 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include + +/* This C++ files header file */ +#include "./rdb_cf_options.h" + +/* C++ system header files */ +#include + +/* MySQL header files */ +#include "./log.h" + +/* RocksDB header files */ +#include "rocksdb/utilities/convenience.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./rdb_cf_manager.h" +#include "./rdb_compact_filter.h" + +namespace myrocks { + +Rdb_pk_comparator Rdb_cf_options::s_pk_comparator; +Rdb_rev_comparator Rdb_cf_options::s_rev_pk_comparator; + +bool Rdb_cf_options::init( + const rocksdb::BlockBasedTableOptions &table_options, + std::shared_ptr prop_coll_factory, + const char *const default_cf_options, + const char *const override_cf_options) { + DBUG_ASSERT(default_cf_options != nullptr); + DBUG_ASSERT(override_cf_options != nullptr); + + m_default_cf_opts.comparator = &s_pk_comparator; + m_default_cf_opts.compaction_filter_factory.reset( + new Rdb_compact_filter_factory); + + m_default_cf_opts.table_factory.reset( + rocksdb::NewBlockBasedTableFactory(table_options)); + + if (prop_coll_factory) { + m_default_cf_opts.table_properties_collector_factories.push_back( + prop_coll_factory); + } + + if (!set_default(std::string(default_cf_options)) || + !set_override(std::string(override_cf_options))) { + return false; + } + + return true; +} + +void Rdb_cf_options::get(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts) { + DBUG_ASSERT(opts != nullptr); + + // set defaults + rocksdb::GetColumnFamilyOptionsFromString(*opts, m_default_config, opts); + + // set per-cf config if we have one + Name_to_config_t::iterator it = m_name_map.find(cf_name); + if (it != m_name_map.end()) { + rocksdb::GetColumnFamilyOptionsFromString(*opts, it->second, opts); + } +} + +bool Rdb_cf_options::set_default(const std::string &default_config) { + rocksdb::ColumnFamilyOptions options; + + if (!default_config.empty() && + !rocksdb::GetColumnFamilyOptionsFromString(options, default_config, + &options) + .ok()) { + fprintf(stderr, "Invalid default column family config: %s\n", + default_config.c_str()); + return false; + } + + m_default_config = default_config; + return true; +} + +// Skip over any spaces in the input string. +void Rdb_cf_options::skip_spaces(const std::string &input, size_t *const pos) { + DBUG_ASSERT(pos != nullptr); + + while (*pos < input.size() && isspace(input[*pos])) + ++(*pos); +} + +// Find a valid column family name. Note that all characters except a +// semicolon are valid (should this change?) and all spaces are trimmed from +// the beginning and end but are not removed between other characters. +bool Rdb_cf_options::find_column_family(const std::string &input, + size_t *const pos, + std::string *const key) { + DBUG_ASSERT(pos != nullptr); + DBUG_ASSERT(key != nullptr); + + const size_t beg_pos = *pos; + size_t end_pos = *pos - 1; + + // Loop through the characters in the string until we see a '='. + for (; *pos < input.size() && input[*pos] != '='; ++(*pos)) { + // If this is not a space, move the end position to the current position. + if (input[*pos] != ' ') + end_pos = *pos; + } + + if (end_pos == beg_pos - 1) { + // NO_LINT_DEBUG + sql_print_warning("No column family found (options: %s)", input.c_str()); + return false; + } + + *key = input.substr(beg_pos, end_pos - beg_pos + 1); + return true; +} + +// Find a valid options portion. Everything is deemed valid within the options +// portion until we hit as many close curly braces as we have seen open curly +// braces. +bool Rdb_cf_options::find_options(const std::string &input, size_t *const pos, + std::string *const options) { + DBUG_ASSERT(pos != nullptr); + DBUG_ASSERT(options != nullptr); + + // Make sure we have an open curly brace at the current position. + if (*pos < input.size() && input[*pos] != '{') { + // NO_LINT_DEBUG + sql_print_warning("Invalid cf options, '{' expected (options: %s)", + input.c_str()); + return false; + } + + // Skip the open curly brace and any spaces. + ++(*pos); + skip_spaces(input, pos); + + // Set up our brace_count, the begin position and current end position. + size_t brace_count = 1; + const size_t beg_pos = *pos; + + // Loop through the characters in the string until we find the appropriate + // number of closing curly braces. + while (*pos < input.size()) { + switch (input[*pos]) { + case '}': + // If this is a closing curly brace and we bring the count down to zero + // we can exit the loop with a valid options string. + if (--brace_count == 0) { + *options = input.substr(beg_pos, *pos - beg_pos); + ++(*pos); // Move past the last closing curly brace + return true; + } + + break; + + case '{': + // If this is an open curly brace increment the count. + ++brace_count; + break; + + default: + break; + } + + // Move to the next character. + ++(*pos); + } + + // We never found the correct number of closing curly braces. + // Generate an error. + // NO_LINT_DEBUG + sql_print_warning("Mismatched cf options, '}' expected (options: %s)", + input.c_str()); + return false; +} + +bool Rdb_cf_options::find_cf_options_pair(const std::string &input, + size_t *const pos, + std::string *const cf, + std::string *const opt_str) { + DBUG_ASSERT(pos != nullptr); + DBUG_ASSERT(cf != nullptr); + DBUG_ASSERT(opt_str != nullptr); + + // Skip any spaces. + skip_spaces(input, pos); + + // We should now have a column family name. + if (!find_column_family(input, pos, cf)) + return false; + + // If we are at the end of the input then we generate an error. + if (*pos == input.size()) { + // NO_LINT_DEBUG + sql_print_warning("Invalid cf options, '=' expected (options: %s)", + input.c_str()); + return false; + } + + // Skip equal sign and any spaces after it + ++(*pos); + skip_spaces(input, pos); + + // Find the options for this column family. This should be in the format + // {} where may contain embedded pairs of curly braces. + if (!find_options(input, pos, opt_str)) + return false; + + // Skip any trailing spaces after the option string. + skip_spaces(input, pos); + + // We should either be at the end of the input string or at a semicolon. + if (*pos < input.size()) { + if (input[*pos] != ';') { + // NO_LINT_DEBUG + sql_print_warning("Invalid cf options, ';' expected (options: %s)", + input.c_str()); + return false; + } + + ++(*pos); + } + + return true; +} + +bool Rdb_cf_options::set_override(const std::string &override_config) { + // TODO(???): support updates? + + std::string cf; + std::string opt_str; + rocksdb::ColumnFamilyOptions options; + Name_to_config_t configs; + + // Loop through the characters of the string until we reach the end. + size_t pos = 0; + while (pos < override_config.size()) { + // Attempt to find ={}. + if (!find_cf_options_pair(override_config, &pos, &cf, &opt_str)) + return false; + + // Generate an error if we have already seen this column family. + if (configs.find(cf) != configs.end()) { + // NO_LINT_DEBUG + sql_print_warning( + "Duplicate entry for %s in override options (options: %s)", + cf.c_str(), override_config.c_str()); + return false; + } + + // Generate an error if the is not valid according to RocksDB. + if (!rocksdb::GetColumnFamilyOptionsFromString(options, opt_str, &options) + .ok()) { + // NO_LINT_DEBUG + sql_print_warning( + "Invalid cf config for %s in override options (options: %s)", + cf.c_str(), override_config.c_str()); + return false; + } + + // If everything is good, add this cf/opt_str pair to the map. + configs[cf] = opt_str; + } + + // Everything checked out - make the map live + m_name_map = configs; + + return true; +} + +const rocksdb::Comparator * +Rdb_cf_options::get_cf_comparator(const std::string &cf_name) { + if (Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str())) { + return &s_rev_pk_comparator; + } else { + return &s_pk_comparator; + } +} + +void Rdb_cf_options::get_cf_options(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts) { + DBUG_ASSERT(opts != nullptr); + + *opts = m_default_cf_opts; + get(cf_name, opts); + + // Set the comparator according to 'rev:' + opts->comparator = get_cf_comparator(cf_name); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h new file mode 100644 index 0000000000000..1cd80a131ad33 --- /dev/null +++ b/storage/rocksdb/rdb_cf_options.h @@ -0,0 +1,96 @@ +/* + Copyright (c) 2014, SkySQL Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ system header files */ +#include +#include + +/* RocksDB header files */ +#include "rocksdb/table.h" +#include "rocksdb/utilities/options_util.h" + +/* MyRocks header files */ +#include "./rdb_comparator.h" + +namespace myrocks { + +/* + Per-column family options configs. + + Per-column family option can be set + - Globally (the same value applies to all column families) + - Per column family: there is a {cf_name -> value} map, + and also there is a default value which applies to column + families not found in the map. +*/ +class Rdb_cf_options { +public: + Rdb_cf_options(const Rdb_cf_options &) = delete; + Rdb_cf_options &operator=(const Rdb_cf_options &) = delete; + Rdb_cf_options() = default; + + void get(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts); + + bool init(const rocksdb::BlockBasedTableOptions &table_options, + std::shared_ptr + prop_coll_factory, + const char *const default_cf_options, + const char *const override_cf_options); + + const rocksdb::ColumnFamilyOptions &get_defaults() const { + return m_default_cf_opts; + } + + static const rocksdb::Comparator * + get_cf_comparator(const std::string &cf_name); + + void get_cf_options(const std::string &cf_name, + rocksdb::ColumnFamilyOptions *const opts) + MY_ATTRIBUTE((__nonnull__)); + +private: + bool set_default(const std::string &default_config); + bool set_override(const std::string &overide_config); + + /* Helper string manipulation functions */ + static void skip_spaces(const std::string &input, size_t *const pos); + static bool find_column_family(const std::string &input, size_t *const pos, + std::string *const key); + static bool find_options(const std::string &input, size_t *const pos, + std::string *const options); + static bool find_cf_options_pair(const std::string &input, size_t *const pos, + std::string *const cf, + std::string *const opt_str); + +private: + static Rdb_pk_comparator s_pk_comparator; + static Rdb_rev_comparator s_rev_pk_comparator; + + typedef std::unordered_map Name_to_config_t; + + /* CF name -> value map */ + Name_to_config_t m_name_map; + + /* The default value (if there is only one value, it is stored here) */ + std::string m_default_config; + + rocksdb::ColumnFamilyOptions m_default_cf_opts; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h new file mode 100644 index 0000000000000..9e0d69597ffc5 --- /dev/null +++ b/storage/rocksdb/rdb_compact_filter.h @@ -0,0 +1,108 @@ +/* + Portions Copyright (c) 2016-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* C++ system header files */ +#include + +/* RocksDB includes */ +#include "rocksdb/compaction_filter.h" + +/* MyRocks includes */ +#include "./ha_rocksdb_proto.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +class Rdb_compact_filter : public rocksdb::CompactionFilter { +public: + Rdb_compact_filter(const Rdb_compact_filter &) = delete; + Rdb_compact_filter &operator=(const Rdb_compact_filter &) = delete; + + explicit Rdb_compact_filter(uint32_t _cf_id) : m_cf_id(_cf_id) {} + ~Rdb_compact_filter() {} + + // keys are passed in sorted order within the same sst. + // V1 Filter is thread safe on our usage (creating from Factory). + // Make sure to protect instance variables when switching to thread + // unsafe in the future. + virtual bool Filter(int level, const rocksdb::Slice &key, + const rocksdb::Slice &existing_value, + std::string *new_value, + bool *value_changed) const override { + DBUG_ASSERT(key.size() >= sizeof(uint32)); + + GL_INDEX_ID gl_index_id; + gl_index_id.cf_id = m_cf_id; + gl_index_id.index_id = rdb_netbuf_to_uint32((const uchar *)key.data()); + DBUG_ASSERT(gl_index_id.index_id >= 1); + + if (gl_index_id != m_prev_index) // processing new index id + { + if (m_num_deleted > 0) { + m_num_deleted = 0; + } + m_should_delete = + rdb_get_dict_manager()->is_drop_index_ongoing(gl_index_id); + m_prev_index = gl_index_id; + } + + if (m_should_delete) { + m_num_deleted++; + } + + return m_should_delete; + } + + virtual bool IgnoreSnapshots() const override { return true; } + + virtual const char *Name() const override { return "Rdb_compact_filter"; } + +private: + // Column family for this compaction filter + const uint32_t m_cf_id; + // Index id of the previous record + mutable GL_INDEX_ID m_prev_index = {0, 0}; + // Number of rows deleted for the same index id + mutable uint64 m_num_deleted = 0; + // Current index id should be deleted or not (should be deleted if true) + mutable bool m_should_delete = false; +}; + +class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory { +public: + Rdb_compact_filter_factory(const Rdb_compact_filter_factory &) = delete; + Rdb_compact_filter_factory & + operator=(const Rdb_compact_filter_factory &) = delete; + Rdb_compact_filter_factory() {} + + ~Rdb_compact_filter_factory() {} + + const char *Name() const override { return "Rdb_compact_filter_factory"; } + + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context &context) override { + return std::unique_ptr( + new Rdb_compact_filter(context.column_family_id)); + } +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_comparator.h b/storage/rocksdb/rdb_comparator.h new file mode 100644 index 0000000000000..47b83abc70a12 --- /dev/null +++ b/storage/rocksdb/rdb_comparator.h @@ -0,0 +1,99 @@ +/* + Copyright (c) 2012,2015 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ system header files */ +#include + +/* MySQL includes */ +#include "./m_ctype.h" + +/* RocksDB header files */ +#include "rocksdb/comparator.h" + +/* MyRocks header files */ +#include "./rdb_utils.h" + +namespace myrocks { + +/* + The keys are in form: {index_number} {mem-comparable-key} + + (todo: knowledge about this format is shared between this class and + Rdb_key_def) +*/ +class Rdb_pk_comparator : public rocksdb::Comparator { +public: + Rdb_pk_comparator(const Rdb_pk_comparator &) = delete; + Rdb_pk_comparator &operator=(const Rdb_pk_comparator &) = delete; + Rdb_pk_comparator() = default; + + static int bytewise_compare(const rocksdb::Slice &a, + const rocksdb::Slice &b) { + const size_t a_size = a.size(); + const size_t b_size = b.size(); + const size_t len = (a_size < b_size) ? a_size : b_size; + int res; + + if ((res = memcmp(a.data(), b.data(), len))) + return res; + + /* Ok, res== 0 */ + if (a_size != b_size) { + return a_size < b_size ? -1 : 1; + } + return HA_EXIT_SUCCESS; + } + + /* Override virtual methods of interest */ + + int Compare(const rocksdb::Slice &a, const rocksdb::Slice &b) const override { + return bytewise_compare(a, b); + } + + const char *Name() const override { return "RocksDB_SE_v3.10"; } + + // TODO: advanced funcs: + // - FindShortestSeparator + // - FindShortSuccessor + + // for now, do-nothing implementations: + void FindShortestSeparator(std::string *start, + const rocksdb::Slice &limit) const override {} + void FindShortSuccessor(std::string *key) const override {} +}; + +class Rdb_rev_comparator : public rocksdb::Comparator { +public: + Rdb_rev_comparator(const Rdb_rev_comparator &) = delete; + Rdb_rev_comparator &operator=(const Rdb_rev_comparator &) = delete; + Rdb_rev_comparator() = default; + + static int bytewise_compare(const rocksdb::Slice &a, + const rocksdb::Slice &b) { + return -Rdb_pk_comparator::bytewise_compare(a, b); + } + + int Compare(const rocksdb::Slice &a, const rocksdb::Slice &b) const override { + return -Rdb_pk_comparator::bytewise_compare(a, b); + } + const char *Name() const override { return "rev:RocksDB_SE_v3.10"; } + void FindShortestSeparator(std::string *start, + const rocksdb::Slice &limit) const override {} + void FindShortSuccessor(std::string *key) const override {} +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc new file mode 100644 index 0000000000000..255a54cbdced3 --- /dev/null +++ b/storage/rocksdb/rdb_datadic.cc @@ -0,0 +1,4258 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +/* For use of 'PRIu64': */ +#define __STDC_FORMAT_MACROS + +#include +/* This C++ file's header file */ +#include "./rdb_datadic.h" + +#include +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include +#include + +/* MySQL header files */ +#include "./key.h" +#include "./m_ctype.h" +#include "./my_bit.h" + +/* MyRocks header files */ +#include "./ha_rocksdb_proto.h" +#include "./my_stacktrace.h" +#include "./rdb_cf_manager.h" +#include "./rdb_utils.h" + +namespace myrocks { + +void get_mem_comparable_space(const CHARSET_INFO *cs, + const std::vector **xfrm, size_t *xfrm_len, + size_t *mb_len); + +/* + Rdb_key_def class implementation +*/ + +Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg, + rocksdb::ColumnFamilyHandle *cf_handle_arg, + uint16_t index_dict_version_arg, uchar index_type_arg, + uint16_t kv_format_version_arg, bool is_reverse_cf_arg, + bool is_auto_cf_arg, bool is_per_partition_cf_arg, + const char *_name, Rdb_index_stats _stats) + : m_index_number(indexnr_arg), m_cf_handle(cf_handle_arg), + m_index_dict_version(index_dict_version_arg), + m_index_type(index_type_arg), m_kv_format_version(kv_format_version_arg), + m_is_reverse_cf(is_reverse_cf_arg), m_is_auto_cf(is_auto_cf_arg), + m_is_per_partition_cf(is_per_partition_cf_arg), + m_name(_name), m_stats(_stats), m_pk_part_no(nullptr), + m_pack_info(nullptr), m_keyno(keyno_arg), m_key_parts(0), + m_prefix_extractor(nullptr), m_maxlength(0) // means 'not intialized' +{ + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); + DBUG_ASSERT(m_cf_handle != nullptr); +} + +Rdb_key_def::Rdb_key_def(const Rdb_key_def &k) + : m_index_number(k.m_index_number), m_cf_handle(k.m_cf_handle), + m_is_reverse_cf(k.m_is_reverse_cf), m_is_auto_cf(k.m_is_auto_cf), + m_is_per_partition_cf(k.m_is_per_partition_cf), + m_name(k.m_name), m_stats(k.m_stats), m_pk_part_no(k.m_pk_part_no), + m_pack_info(k.m_pack_info), m_keyno(k.m_keyno), + m_key_parts(k.m_key_parts), m_prefix_extractor(k.m_prefix_extractor), + m_maxlength(k.m_maxlength) { + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); + if (k.m_pack_info) { + const size_t size = sizeof(Rdb_field_packing) * k.m_key_parts; + m_pack_info = + reinterpret_cast(my_malloc(size, MYF(0))); + memcpy(m_pack_info, k.m_pack_info, size); + } + + if (k.m_pk_part_no) { + const size_t size = sizeof(uint) * m_key_parts; + m_pk_part_no = reinterpret_cast(my_malloc(size, MYF(0))); + memcpy(m_pk_part_no, k.m_pk_part_no, size); + } +} + +Rdb_key_def::~Rdb_key_def() { + mysql_mutex_destroy(&m_mutex); + + my_free(m_pk_part_no); + m_pk_part_no = nullptr; + + my_free(m_pack_info); + m_pack_info = nullptr; +} + +void Rdb_key_def::setup(const TABLE *const tbl, + const Rdb_tbl_def *const tbl_def) { + DBUG_ASSERT(tbl != nullptr); + DBUG_ASSERT(tbl_def != nullptr); + + /* + Set max_length based on the table. This can be called concurrently from + multiple threads, so there is a mutex to protect this code. + */ + const bool is_hidden_pk = (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); + const bool hidden_pk_exists = table_has_hidden_pk(tbl); + const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY); + if (!m_maxlength) { + RDB_MUTEX_LOCK_CHECK(m_mutex); + if (m_maxlength != 0) { + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + return; + } + + KEY *key_info = nullptr; + KEY *pk_info = nullptr; + if (!is_hidden_pk) { + key_info = &tbl->key_info[m_keyno]; + if (!hidden_pk_exists) + pk_info = &tbl->key_info[tbl->s->primary_key]; + m_name = std::string(key_info->name); + } else { + m_name = HIDDEN_PK_NAME; + } + + if (secondary_key) + m_pk_key_parts= hidden_pk_exists ? 1 : pk_info->ext_key_parts; + else { + pk_info = nullptr; + m_pk_key_parts = 0; + } + + // "unique" secondary keys support: + m_key_parts= is_hidden_pk ? 1 : key_info->ext_key_parts; + + if (secondary_key) { + /* + In most cases, SQL layer puts PK columns as invisible suffix at the + end of secondary key. There are cases where this doesn't happen: + - unique secondary indexes. + - partitioned tables. + + Internally, we always need PK columns as suffix (and InnoDB does, + too, if you were wondering). + + The loop below will attempt to put all PK columns at the end of key + definition. Columns that are already included in the index (either + by the user or by "extended keys" feature) are not included for the + second time. + */ + m_key_parts += m_pk_key_parts; + } + + if (secondary_key) + m_pk_part_no = reinterpret_cast( + my_malloc(sizeof(uint) * m_key_parts, MYF(0))); + else + m_pk_part_no = nullptr; + + const size_t size = sizeof(Rdb_field_packing) * m_key_parts; + m_pack_info = + reinterpret_cast(my_malloc(size, MYF(0))); + + size_t max_len = INDEX_NUMBER_SIZE; + int unpack_len = 0; + int max_part_len = 0; + bool simulating_extkey = false; + uint dst_i = 0; + + uint keyno_to_set = m_keyno; + uint keypart_to_set = 0; + + if (is_hidden_pk) { + Field *field = nullptr; + m_pack_info[dst_i].setup(this, field, keyno_to_set, 0, 0); + m_pack_info[dst_i].m_unpack_data_offset = unpack_len; + max_len += m_pack_info[dst_i].m_max_image_len; + max_part_len = std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); + dst_i++; + } else { + KEY_PART_INFO *key_part = key_info->key_part; + + /* this loop also loops over the 'extended key' tail */ + for (uint src_i = 0; src_i < m_key_parts; src_i++, keypart_to_set++) { + Field *const field = key_part ? key_part->field : nullptr; + + if (simulating_extkey && !hidden_pk_exists) { + DBUG_ASSERT(secondary_key); + /* Check if this field is already present in the key definition */ + bool found = false; + for (uint j= 0; j < key_info->ext_key_parts; j++) { + if (field->field_index == + key_info->key_part[j].field->field_index && + key_part->length == key_info->key_part[j].length) { + found = true; + break; + } + } + + if (found) { + key_part++; + continue; + } + } + + if (field && field->real_maybe_null()) + max_len += 1; // NULL-byte + + m_pack_info[dst_i].setup(this, field, keyno_to_set, keypart_to_set, + key_part ? key_part->length : 0); + m_pack_info[dst_i].m_unpack_data_offset = unpack_len; + + if (pk_info) { + m_pk_part_no[dst_i] = -1; + for (uint j = 0; j < m_pk_key_parts; j++) { + if (field->field_index == pk_info->key_part[j].field->field_index) { + m_pk_part_no[dst_i] = j; + break; + } + } + } else if (secondary_key && hidden_pk_exists) { + /* + The hidden pk can never be part of the sk. So it is always + appended to the end of the sk. + */ + m_pk_part_no[dst_i] = -1; + if (simulating_extkey) + m_pk_part_no[dst_i] = 0; + } + + max_len += m_pack_info[dst_i].m_max_image_len; + + max_part_len = + std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); + + key_part++; + /* + For "unique" secondary indexes, pretend they have + "index extensions". + + MariaDB also has this property: if an index has a partially-covered + column like KEY(varchar_col(N)), then the SQL layer will think it is + not "extended" with PK columns. The code below handles this case, + also. + */ + if (secondary_key && src_i+1 == key_info->ext_key_parts) { + simulating_extkey = true; + if (!hidden_pk_exists) { + keyno_to_set = tbl->s->primary_key; + key_part = pk_info->key_part; + keypart_to_set = (uint)-1; + } else { + keyno_to_set = tbl_def->m_key_count - 1; + key_part = nullptr; + keypart_to_set = 0; + } + } + + dst_i++; + } + } + + m_key_parts = dst_i; + + /* Initialize the memory needed by the stats structure */ + m_stats.m_distinct_keys_per_prefix.resize(get_key_parts()); + + /* Cache prefix extractor for bloom filter usage later */ + rocksdb::Options opt = rdb_get_rocksdb_db()->GetOptions(get_cf()); + m_prefix_extractor = opt.prefix_extractor; + + /* + This should be the last member variable set before releasing the mutex + so that other threads can't see the object partially set up. + */ + m_maxlength = max_len; + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + } +} + +/** + Read a memcmp key part from a slice using the passed in reader. + + Returns -1 if field was null, 1 if error, 0 otherwise. +*/ +int Rdb_key_def::read_memcmp_key_part(const TABLE *table_arg, + Rdb_string_reader *reader, + const uint part_num) const { + /* It is impossible to unpack the column. Skip it. */ + if (m_pack_info[part_num].m_maybe_null) { + const char *nullp; + if (!(nullp = reader->read(1))) + return 1; + if (*nullp == 0) { + /* This is a NULL value */ + return -1; + } else { + /* If NULL marker is not '0', it can be only '1' */ + if (*nullp != 1) + return 1; + } + } + + Rdb_field_packing *fpi = &m_pack_info[part_num]; + DBUG_ASSERT(table_arg->s != nullptr); + + bool is_hidden_pk_part = (part_num + 1 == m_key_parts) && + (table_arg->s->primary_key == MAX_INDEXES); + Field *field = nullptr; + if (!is_hidden_pk_part) + field = fpi->get_field_in_table(table_arg); + if (fpi->m_skip_func(fpi, field, reader)) + return 1; + + return 0; +} + +/** + Get a mem-comparable form of Primary Key from mem-comparable form of this key + + @param + pk_descr Primary Key descriptor + key Index tuple from this key in mem-comparable form + pk_buffer OUT Put here mem-comparable form of the Primary Key. + + @note + It may or may not be possible to restore primary key columns to their + mem-comparable form. To handle all cases, this function copies mem- + comparable forms directly. + + RocksDB SE supports "Extended keys". This means that PK columns are present + at the end of every key. If the key already includes PK columns, then + these columns are not present at the end of the key. + + Because of the above, we copy each primary key column. + + @todo + If we checked crc32 checksums in this function, we would catch some CRC + violations that we currently don't. On the other hand, there is a broader + set of queries for which we would check the checksum twice. +*/ + +uint Rdb_key_def::get_primary_key_tuple(const TABLE *const table, + const Rdb_key_def &pk_descr, + const rocksdb::Slice *const key, + uchar *const pk_buffer) const { + DBUG_ASSERT(table != nullptr); + DBUG_ASSERT(key != nullptr); + DBUG_ASSERT(pk_buffer); + + uint size = 0; + uchar *buf = pk_buffer; + DBUG_ASSERT(m_pk_key_parts); + + /* Put the PK number */ + rdb_netbuf_store_index(buf, pk_descr.m_index_number); + buf += INDEX_NUMBER_SIZE; + size += INDEX_NUMBER_SIZE; + + const char *start_offs[MAX_REF_PARTS]; + const char *end_offs[MAX_REF_PARTS]; + int pk_key_part; + uint i; + Rdb_string_reader reader(key); + + // Skip the index number + if ((!reader.read(INDEX_NUMBER_SIZE))) + return RDB_INVALID_KEY_LEN; + + for (i = 0; i < m_key_parts; i++) { + if ((pk_key_part = m_pk_part_no[i]) != -1) { + start_offs[pk_key_part] = reader.get_current_ptr(); + } + + if (read_memcmp_key_part(table, &reader, i) > 0) { + return RDB_INVALID_KEY_LEN; + } + + if (pk_key_part != -1) { + end_offs[pk_key_part] = reader.get_current_ptr(); + } + } + + for (i = 0; i < m_pk_key_parts; i++) { + const uint part_size = end_offs[i] - start_offs[i]; + memcpy(buf, start_offs[i], end_offs[i] - start_offs[i]); + buf += part_size; + size += part_size; + } + + return size; +} + +/** + Get a mem-comparable form of Secondary Key from mem-comparable form of this + key, without the extended primary key tail. + + @param + key Index tuple from this key in mem-comparable form + sk_buffer OUT Put here mem-comparable form of the Secondary Key. + n_null_fields OUT Put number of null fields contained within sk entry +*/ +uint Rdb_key_def::get_memcmp_sk_parts(const TABLE *table, + const rocksdb::Slice &key, + uchar *sk_buffer, + uint *n_null_fields) const { + DBUG_ASSERT(table != nullptr); + DBUG_ASSERT(sk_buffer != nullptr); + DBUG_ASSERT(n_null_fields != nullptr); + DBUG_ASSERT(m_keyno != table->s->primary_key && !table_has_hidden_pk(table)); + + uchar *buf = sk_buffer; + + int res; + Rdb_string_reader reader(&key); + const char *start = reader.get_current_ptr(); + + // Skip the index number + if ((!reader.read(INDEX_NUMBER_SIZE))) + return RDB_INVALID_KEY_LEN; + + for (uint i = 0; i < table->key_info[m_keyno].user_defined_key_parts; i++) { + if ((res = read_memcmp_key_part(table, &reader, i)) > 0) { + return RDB_INVALID_KEY_LEN; + } else if (res == -1) { + (*n_null_fields)++; + } + } + + uint sk_memcmp_len = reader.get_current_ptr() - start; + memcpy(buf, start, sk_memcmp_len); + return sk_memcmp_len; +} + +/** + Convert index tuple into storage (i.e. mem-comparable) format + + @detail + Currently this is done by unpacking into table->record[0] and then + packing index columns into storage format. + + @param pack_buffer Temporary area for packing varchar columns. Its + size is at least max_storage_fmt_length() bytes. +*/ + +uint Rdb_key_def::pack_index_tuple(TABLE *const tbl, uchar *const pack_buffer, + uchar *const packed_tuple, + const uchar *const key_tuple, + const key_part_map &keypart_map) const { + DBUG_ASSERT(tbl != nullptr); + DBUG_ASSERT(pack_buffer != nullptr); + DBUG_ASSERT(packed_tuple != nullptr); + DBUG_ASSERT(key_tuple != nullptr); + + /* We were given a record in KeyTupleFormat. First, save it to record */ + const uint key_len = calculate_key_len(tbl, m_keyno, key_tuple, keypart_map); + key_restore(tbl->record[0], key_tuple, &tbl->key_info[m_keyno], key_len); + + uint n_used_parts = my_count_bits(keypart_map); + if (keypart_map == HA_WHOLE_KEY) + n_used_parts = 0; // Full key is used + + /* Then, convert the record into a mem-comparable form */ + return pack_record(tbl, pack_buffer, tbl->record[0], packed_tuple, nullptr, + false, 0, n_used_parts); +} + +/** + @brief + Check if "unpack info" data includes checksum. + + @detail + This is used only by CHECK TABLE to count the number of rows that have + checksums. +*/ + +bool Rdb_key_def::unpack_info_has_checksum(const rocksdb::Slice &unpack_info) { + const uchar *ptr = (const uchar *)unpack_info.data(); + size_t size = unpack_info.size(); + + // Skip unpack info if present. + if (size >= RDB_UNPACK_HEADER_SIZE && ptr[0] == RDB_UNPACK_DATA_TAG) { + const uint16 skip_len = rdb_netbuf_to_uint16(ptr + 1); + SHIP_ASSERT(size >= skip_len); + + size -= skip_len; + ptr += skip_len; + } + + return (size == RDB_CHECKSUM_CHUNK_SIZE && ptr[0] == RDB_CHECKSUM_DATA_TAG); +} + +/* + @return Number of bytes that were changed +*/ +int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) { + DBUG_ASSERT(packed_tuple != nullptr); + + int changed = 0; + uchar *p = packed_tuple + len - 1; + for (; p > packed_tuple; p--) { + changed++; + if (*p != uchar(0xFF)) { + *p = *p + 1; + break; + } + *p = '\0'; + } + return changed; +} + +uchar *Rdb_key_def::pack_field( + Field *const field, + Rdb_field_packing *pack_info, + uchar * tuple, + uchar *const packed_tuple, + uchar *const pack_buffer, + Rdb_string_writer *const unpack_info, + uint *const n_null_fields) const +{ + if (field->real_maybe_null()) { + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1)); + if (field->is_real_null()) { + /* NULL value. store '\0' so that it sorts before non-NULL values */ + *tuple++ = 0; + /* That's it, don't store anything else */ + if (n_null_fields) + (*n_null_fields)++; + return tuple; + } else { + /* Not a NULL value. Store '1' */ + *tuple++ = 1; + } + } + + const bool create_unpack_info = + (unpack_info && // we were requested to generate unpack_info + pack_info->uses_unpack_info()); // and this keypart uses it + Rdb_pack_field_context pack_ctx(unpack_info); + + // Set the offset for methods which do not take an offset as an argument + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, + pack_info->m_max_image_len)); + + pack_info->m_pack_func(pack_info, field, pack_buffer, &tuple, &pack_ctx); + + /* Make "unpack info" to be stored in the value */ + if (create_unpack_info) { + pack_info->m_make_unpack_info_func(pack_info->m_charset_codec, field, + &pack_ctx); + } + + return tuple; +} + +/** + Get index columns from the record and pack them into mem-comparable form. + + @param + tbl Table we're working on + record IN Record buffer with fields in table->record format + pack_buffer IN Temporary area for packing varchars. The size is + at least max_storage_fmt_length() bytes. + packed_tuple OUT Key in the mem-comparable form + unpack_info OUT Unpack data + unpack_info_len OUT Unpack data length + n_key_parts Number of keyparts to process. 0 means all of them. + n_null_fields OUT Number of key fields with NULL value. + + @detail + Some callers do not need the unpack information, they can pass + unpack_info=nullptr, unpack_info_len=nullptr. + + @return + Length of the packed tuple +*/ + +uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer, + const uchar *const record, + uchar *const packed_tuple, + Rdb_string_writer *const unpack_info, + const bool &should_store_row_debug_checksums, + const longlong &hidden_pk_id, uint n_key_parts, + uint *const n_null_fields) const { + DBUG_ASSERT(tbl != nullptr); + DBUG_ASSERT(pack_buffer != nullptr); + DBUG_ASSERT(record != nullptr); + DBUG_ASSERT(packed_tuple != nullptr); + // Checksums for PKs are made when record is packed. + // We should never attempt to make checksum just from PK values + DBUG_ASSERT_IMP(should_store_row_debug_checksums, + (m_index_type == INDEX_TYPE_SECONDARY)); + + uchar *tuple = packed_tuple; + size_t unpack_len_pos = size_t(-1); + const bool hidden_pk_exists = table_has_hidden_pk(tbl); + + rdb_netbuf_store_index(tuple, m_index_number); + tuple += INDEX_NUMBER_SIZE; + + // If n_key_parts is 0, it means all columns. + // The following includes the 'extended key' tail. + // The 'extended key' includes primary key. This is done to 'uniqify' + // non-unique indexes + const bool use_all_columns = n_key_parts == 0 || n_key_parts == MAX_REF_PARTS; + + // If hidden pk exists, but hidden pk wasnt passed in, we can't pack the + // hidden key part. So we skip it (its always 1 part). + if (hidden_pk_exists && !hidden_pk_id && use_all_columns) + n_key_parts = m_key_parts - 1; + else if (use_all_columns) + n_key_parts = m_key_parts; + + if (n_null_fields) + *n_null_fields = 0; + + if (unpack_info) { + unpack_info->clear(); + unpack_info->write_uint8(RDB_UNPACK_DATA_TAG); + unpack_len_pos = unpack_info->get_current_pos(); + // we don't know the total length yet, so write a zero + unpack_info->write_uint16(0); + } + + for (uint i = 0; i < n_key_parts; i++) { + // Fill hidden pk id into the last key part for secondary keys for tables + // with no pk + if (hidden_pk_exists && hidden_pk_id && i + 1 == n_key_parts) { + m_pack_info[i].fill_hidden_pk_val(&tuple, hidden_pk_id); + break; + } + + Field *const field = m_pack_info[i].get_field_in_table(tbl); + DBUG_ASSERT(field != nullptr); + + uint field_offset = field->ptr - tbl->record[0]; + uint null_offset = field->null_offset(tbl->record[0]); + bool maybe_null = field->real_maybe_null(); + field->move_field(const_cast(record) + field_offset, + maybe_null ? const_cast(record) + null_offset : nullptr, + field->null_bit); + // WARNING! Don't return without restoring field->ptr and field->null_ptr + + tuple = pack_field(field, &m_pack_info[i], tuple, packed_tuple, pack_buffer, + unpack_info, n_null_fields); + + // Restore field->ptr and field->null_ptr + field->move_field(tbl->record[0] + field_offset, + maybe_null ? tbl->record[0] + null_offset : nullptr, + field->null_bit); + } + + if (unpack_info) { + const size_t len = unpack_info->get_current_pos(); + DBUG_ASSERT(len <= std::numeric_limits::max()); + + // Don't store the unpack_info if it has only the header (that is, there's + // no meaningful content). + // Primary Keys are special: for them, store the unpack_info even if it's + // empty (provided m_maybe_unpack_info==true, see + // ha_rocksdb::convert_record_to_storage_format) + if (len == RDB_UNPACK_HEADER_SIZE && + m_index_type != Rdb_key_def::INDEX_TYPE_PRIMARY) { + unpack_info->clear(); + } else { + unpack_info->write_uint16_at(unpack_len_pos, len); + } + + // + // Secondary keys have key and value checksums in the value part + // Primary key is a special case (the value part has non-indexed columns), + // so the checksums are computed and stored by + // ha_rocksdb::convert_record_to_storage_format + // + if (should_store_row_debug_checksums) { + const uint32_t key_crc32 = crc32(0, packed_tuple, tuple - packed_tuple); + const uint32_t val_crc32 = + crc32(0, unpack_info->ptr(), unpack_info->get_current_pos()); + + unpack_info->write_uint8(RDB_CHECKSUM_DATA_TAG); + unpack_info->write_uint32(key_crc32); + unpack_info->write_uint32(val_crc32); + } + } + + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 0)); + + return tuple - packed_tuple; +} + +/** + Pack the hidden primary key into mem-comparable form. + + @param + tbl Table we're working on + hidden_pk_id IN New value to be packed into key + packed_tuple OUT Key in the mem-comparable form + + @return + Length of the packed tuple +*/ + +uint Rdb_key_def::pack_hidden_pk(const longlong &hidden_pk_id, + uchar *const packed_tuple) const { + DBUG_ASSERT(packed_tuple != nullptr); + + uchar *tuple = packed_tuple; + rdb_netbuf_store_index(tuple, m_index_number); + tuple += INDEX_NUMBER_SIZE; + DBUG_ASSERT(m_key_parts == 1); + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, + m_pack_info[0].m_max_image_len)); + + m_pack_info[0].fill_hidden_pk_val(&tuple, hidden_pk_id); + + DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 0)); + return tuple - packed_tuple; +} + +/* + Function of type rdb_index_field_pack_t +*/ + +void rdb_pack_with_make_sort_key( + Rdb_field_packing *const fpi, Field *const field, + uchar *const buf MY_ATTRIBUTE((__unused__)), uchar **dst, + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) { + DBUG_ASSERT(fpi != nullptr); + DBUG_ASSERT(field != nullptr); + DBUG_ASSERT(dst != nullptr); + DBUG_ASSERT(*dst != nullptr); + + const int max_len = fpi->m_max_image_len; + my_bitmap_map *old_map; + + old_map= dbug_tmp_use_all_columns(field->table, + field->table->read_set); + field->sort_string(*dst, max_len); + dbug_tmp_restore_column_map(field->table->read_set, old_map); + *dst += max_len; +} + +/* + Compares two keys without unpacking + + @detail + @return + 0 - Ok. column_index is the index of the first column which is different. + -1 if two kes are equal + 1 - Data format error. +*/ +int Rdb_key_def::compare_keys(const rocksdb::Slice *key1, + const rocksdb::Slice *key2, + std::size_t *const column_index) const { + DBUG_ASSERT(key1 != nullptr); + DBUG_ASSERT(key2 != nullptr); + DBUG_ASSERT(column_index != nullptr); + + // the caller should check the return value and + // not rely on column_index being valid + *column_index = 0xbadf00d; + + Rdb_string_reader reader1(key1); + Rdb_string_reader reader2(key2); + + // Skip the index number + if ((!reader1.read(INDEX_NUMBER_SIZE))) + return HA_EXIT_FAILURE; + + if ((!reader2.read(INDEX_NUMBER_SIZE))) + return HA_EXIT_FAILURE; + + for (uint i = 0; i < m_key_parts; i++) { + const Rdb_field_packing *const fpi = &m_pack_info[i]; + if (fpi->m_maybe_null) { + const auto nullp1 = reader1.read(1); + const auto nullp2 = reader2.read(1); + + if (nullp1 == nullptr || nullp2 == nullptr) { + return HA_EXIT_FAILURE; + } + + if (*nullp1 != *nullp2) { + *column_index = i; + return HA_EXIT_SUCCESS; + } + + if (*nullp1 == 0) { + /* This is a NULL value */ + continue; + } + } + + const auto before_skip1 = reader1.get_current_ptr(); + const auto before_skip2 = reader2.get_current_ptr(); + DBUG_ASSERT(fpi->m_skip_func); + if (fpi->m_skip_func(fpi, nullptr, &reader1)) + return HA_EXIT_FAILURE; + if (fpi->m_skip_func(fpi, nullptr, &reader2)) + return HA_EXIT_FAILURE; + const auto size1 = reader1.get_current_ptr() - before_skip1; + const auto size2 = reader2.get_current_ptr() - before_skip2; + if (size1 != size2) { + *column_index = i; + return HA_EXIT_SUCCESS; + } + + if (memcmp(before_skip1, before_skip2, size1) != 0) { + *column_index = i; + return HA_EXIT_SUCCESS; + } + } + + *column_index = m_key_parts; + return HA_EXIT_SUCCESS; +} + +/* + @brief + Given a zero-padded key, determine its real key length + + @detail + Fixed-size skip functions just read. +*/ + +size_t Rdb_key_def::key_length(const TABLE *const table, + const rocksdb::Slice &key) const { + DBUG_ASSERT(table != nullptr); + + Rdb_string_reader reader(&key); + + if ((!reader.read(INDEX_NUMBER_SIZE))) + return size_t(-1); + + for (uint i = 0; i < m_key_parts; i++) { + const Rdb_field_packing *fpi = &m_pack_info[i]; + const Field *field = nullptr; + if (m_index_type != INDEX_TYPE_HIDDEN_PRIMARY) + field = fpi->get_field_in_table(table); + if (fpi->m_skip_func(fpi, field, &reader)) + return size_t(-1); + } + return key.size() - reader.remaining_bytes(); +} + +int Rdb_key_def::unpack_field( + Rdb_field_packing *const fpi, + Field *const field, + Rdb_string_reader* reader, + const uchar *const default_value, + Rdb_string_reader* unp_reader) const +{ + if (fpi->m_maybe_null) { + const char *nullp; + if (!(nullp = reader->read(1))) { + return HA_EXIT_FAILURE; + } + + if (*nullp == 0) { + /* Set the NULL-bit of this field */ + field->set_null(); + /* Also set the field to its default value */ + memcpy(field->ptr, default_value, field->pack_length()); + return HA_EXIT_SUCCESS; + } else if (*nullp == 1) { + field->set_notnull(); + } else { + return HA_EXIT_FAILURE; + } + } + + return fpi->m_unpack_func(fpi, field, field->ptr, reader, unp_reader); +} + +/* + Take mem-comparable form and unpack_info and unpack it to Table->record + + @detail + not all indexes support this + + @return + UNPACK_SUCCESS - Ok + UNPACK_FAILURE - Data format error. +*/ + +int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, + const rocksdb::Slice *const packed_key, + const rocksdb::Slice *const unpack_info, + const bool &verify_row_debug_checksums) const { + Rdb_string_reader reader(packed_key); + Rdb_string_reader unp_reader = Rdb_string_reader::read_or_empty(unpack_info); + + const bool is_hidden_pk = (m_index_type == INDEX_TYPE_HIDDEN_PRIMARY); + const bool hidden_pk_exists = table_has_hidden_pk(table); + const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY); + // There is no checksuming data after unpack_info for primary keys, because + // the layout there is different. The checksum is verified in + // ha_rocksdb::convert_record_from_storage_format instead. + DBUG_ASSERT_IMP(!secondary_key, !verify_row_debug_checksums); + + // Skip the index number + if ((!reader.read(INDEX_NUMBER_SIZE))) { + return HA_EXIT_FAILURE; + } + + // For secondary keys, we expect the value field to contain unpack data and + // checksum data in that order. One or both can be missing, but they cannot + // be reordered. + const bool has_unpack_info = + unp_reader.remaining_bytes() && + *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG; + if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE)) { + return HA_EXIT_FAILURE; + } + + for (uint i = 0; i < m_key_parts; i++) { + Rdb_field_packing *const fpi = &m_pack_info[i]; + + /* + Hidden pk field is packed at the end of the secondary keys, but the SQL + layer does not know about it. Skip retrieving field if hidden pk. + */ + if ((secondary_key && hidden_pk_exists && i + 1 == m_key_parts) || + is_hidden_pk) { + DBUG_ASSERT(fpi->m_unpack_func); + if (fpi->m_skip_func(fpi, nullptr, &reader)) { + return HA_EXIT_FAILURE; + } + continue; + } + + Field *const field = fpi->get_field_in_table(table); + + if (fpi->m_unpack_func) { + /* It is possible to unpack this column. Do it. */ + + uint field_offset = field->ptr - table->record[0]; + uint null_offset = field->null_offset(); + bool maybe_null = field->real_maybe_null(); + field->move_field(buf + field_offset, + maybe_null ? buf + null_offset : nullptr, + field->null_bit); + // WARNING! Don't return without restoring field->ptr and field->null_ptr + + // If we need unpack info, but there is none, tell the unpack function + // this by passing unp_reader as nullptr. If we never read unpack_info + // during unpacking anyway, then there won't an error. + const bool maybe_missing_unpack = + !has_unpack_info && fpi->uses_unpack_info(); + int res = unpack_field(fpi, field, &reader, + table->s->default_values + field_offset, + maybe_missing_unpack ? nullptr : &unp_reader); + + // Restore field->ptr and field->null_ptr + field->move_field(table->record[0] + field_offset, + maybe_null ? table->record[0] + null_offset : nullptr, + field->null_bit); + + if (res) { + return res; + } + } else { + /* It is impossible to unpack the column. Skip it. */ + if (fpi->m_maybe_null) { + const char *nullp; + if (!(nullp = reader.read(1))) + return HA_EXIT_FAILURE; + if (*nullp == 0) { + /* This is a NULL value */ + continue; + } + /* If NULL marker is not '0', it can be only '1' */ + if (*nullp != 1) + return HA_EXIT_FAILURE; + } + if (fpi->m_skip_func(fpi, field, &reader)) + return HA_EXIT_FAILURE; + } + } + + /* + Check checksum values if present + */ + const char *ptr; + if ((ptr = unp_reader.read(1)) && *ptr == RDB_CHECKSUM_DATA_TAG) { + if (verify_row_debug_checksums) { + uint32_t stored_key_chksum = rdb_netbuf_to_uint32( + (const uchar *)unp_reader.read(RDB_CHECKSUM_SIZE)); + const uint32_t stored_val_chksum = rdb_netbuf_to_uint32( + (const uchar *)unp_reader.read(RDB_CHECKSUM_SIZE)); + + const uint32_t computed_key_chksum = + crc32(0, (const uchar *)packed_key->data(), packed_key->size()); + const uint32_t computed_val_chksum = + crc32(0, (const uchar *)unpack_info->data(), + unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); + + DBUG_EXECUTE_IF("myrocks_simulate_bad_key_checksum1", + stored_key_chksum++;); + + if (stored_key_chksum != computed_key_chksum) { + report_checksum_mismatch(true, packed_key->data(), packed_key->size()); + return HA_EXIT_FAILURE; + } + + if (stored_val_chksum != computed_val_chksum) { + report_checksum_mismatch(false, unpack_info->data(), + unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); + return HA_EXIT_FAILURE; + } + } else { + /* The checksums are present but we are not checking checksums */ + } + } + + if (reader.remaining_bytes()) + return HA_EXIT_FAILURE; + + return HA_EXIT_SUCCESS; +} + +bool Rdb_key_def::table_has_hidden_pk(const TABLE *const table) { + return table->s->primary_key == MAX_INDEXES; +} + +void Rdb_key_def::report_checksum_mismatch(const bool &is_key, + const char *const data, + const size_t data_size) const { + // NO_LINT_DEBUG + sql_print_error("Checksum mismatch in %s of key-value pair for index 0x%x", + is_key ? "key" : "value", get_index_number()); + + const std::string buf = rdb_hexdump(data, data_size, RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("Data with incorrect checksum (%" PRIu64 " bytes): %s", + (uint64_t)data_size, buf.c_str()); + + my_error(ER_INTERNAL_ERROR, MYF(0), "Record checksum mismatch"); +} + +bool Rdb_key_def::index_format_min_check(const int &pk_min, + const int &sk_min) const { + switch (m_index_type) { + case INDEX_TYPE_PRIMARY: + case INDEX_TYPE_HIDDEN_PRIMARY: + return (m_kv_format_version >= pk_min); + case INDEX_TYPE_SECONDARY: + return (m_kv_format_version >= sk_min); + default: + DBUG_ASSERT(0); + return false; + } +} + +/////////////////////////////////////////////////////////////////////////////////////////// +// Rdb_field_packing +/////////////////////////////////////////////////////////////////////////////////////////// + +/* + Function of type rdb_index_field_skip_t +*/ + +int rdb_skip_max_length(const Rdb_field_packing *const fpi, + const Field *const field MY_ATTRIBUTE((__unused__)), + Rdb_string_reader *const reader) { + if (!reader->read(fpi->m_max_image_len)) + return HA_EXIT_FAILURE; + return HA_EXIT_SUCCESS; +} + +/* + (RDB_ESCAPE_LENGTH-1) must be an even number so that pieces of lines are not + split in the middle of an UTF-8 character. See the implementation of + rdb_unpack_binary_or_utf8_varchar. +*/ + +const uint RDB_ESCAPE_LENGTH = 9; +static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0, + "RDB_ESCAPE_LENGTH-1 must be even."); + +/* + Function of type rdb_index_field_skip_t +*/ + +static int rdb_skip_variable_length( + const Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_string_reader *const reader) { + const uchar *ptr; + bool finished = false; + + size_t dst_len; /* How much data can be there */ + if (field) { + const Field_varstring *const field_var = + static_cast(field); + dst_len = field_var->pack_length() - field_var->length_bytes; + } else { + dst_len = UINT_MAX; + } + + /* Decode the length-emitted encoding here */ + while ((ptr = (const uchar *)reader->read(RDB_ESCAPE_LENGTH))) { + /* See rdb_pack_with_varchar_encoding. */ + const uchar pad = + 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + const uchar used_bytes = RDB_ESCAPE_LENGTH - 1 - pad; + + if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len) { + return HA_EXIT_FAILURE; /* cannot store that much, invalid data */ + } + + if (used_bytes < RDB_ESCAPE_LENGTH - 1) { + finished = true; + break; + } + dst_len -= used_bytes; + } + + if (!finished) { + return HA_EXIT_FAILURE; + } + + return HA_EXIT_SUCCESS; +} + +const int VARCHAR_CMP_LESS_THAN_SPACES = 1; +const int VARCHAR_CMP_EQUAL_TO_SPACES = 2; +const int VARCHAR_CMP_GREATER_THAN_SPACES = 3; + +/* + Skip a keypart that uses Variable-Length Space-Padded encoding +*/ + +static int rdb_skip_variable_space_pad(const Rdb_field_packing *const fpi, + const Field *const field, + Rdb_string_reader *const reader) { + const uchar *ptr; + bool finished = false; + + size_t dst_len = UINT_MAX; /* How much data can be there */ + + if (field) { + const Field_varstring *const field_var = + static_cast(field); + dst_len = field_var->pack_length() - field_var->length_bytes; + } + + /* Decode the length-emitted encoding here */ + while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { + // See rdb_pack_with_varchar_space_pad + const uchar c = ptr[fpi->m_segment_size - 1]; + if (c == VARCHAR_CMP_EQUAL_TO_SPACES) { + // This is the last segment + finished = true; + break; + } else if (c == VARCHAR_CMP_LESS_THAN_SPACES || + c == VARCHAR_CMP_GREATER_THAN_SPACES) { + // This is not the last segment + if ((fpi->m_segment_size - 1) > dst_len) { + // The segment is full of data but the table field can't hold that + // much! This must be data corruption. + return HA_EXIT_FAILURE; + } + dst_len -= (fpi->m_segment_size - 1); + } else { + // Encountered a value that's none of the VARCHAR_CMP* constants + // It's data corruption. + return HA_EXIT_FAILURE; + } + } + return finished ? HA_EXIT_SUCCESS : HA_EXIT_FAILURE; +} + +/* + Function of type rdb_index_field_unpack_t +*/ + +int rdb_unpack_integer(Rdb_field_packing *const fpi, Field *const field, + uchar *const to, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) { + const int length = fpi->m_max_image_len; + + const uchar *from; + if (!(from = (const uchar *)reader->read(length))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + +#ifdef WORDS_BIGENDIAN + { + if (((Field_num *)field)->unsigned_flag) + to[0] = from[0]; + else + to[0] = (char)(from[0] ^ 128); // Reverse the sign bit. + memcpy(to + 1, from + 1, length - 1); + } +#else + { + const int sign_byte = from[0]; + if (((Field_num *)field)->unsigned_flag) + to[length - 1] = sign_byte; + else + to[length - 1] = + static_cast(sign_byte ^ 128); // Reverse the sign bit. + for (int i = 0, j = length - 1; i < length - 1; ++i, --j) + to[i] = from[j]; + } +#endif + return UNPACK_SUCCESS; +} + +#if !defined(WORDS_BIGENDIAN) +static void rdb_swap_double_bytes(uchar *const dst, const uchar *const src) { +#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) + // A few systems store the most-significant _word_ first on little-endian + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; + dst[4] = src[7]; + dst[5] = src[6]; + dst[6] = src[5]; + dst[7] = src[4]; +#else + dst[0] = src[7]; + dst[1] = src[6]; + dst[2] = src[5]; + dst[3] = src[4]; + dst[4] = src[3]; + dst[5] = src[2]; + dst[6] = src[1]; + dst[7] = src[0]; +#endif +} + +static void rdb_swap_float_bytes(uchar *const dst, const uchar *const src) { + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; +} +#else +#define rdb_swap_double_bytes nullptr +#define rdb_swap_float_bytes nullptr +#endif + +static int rdb_unpack_floating_point( + uchar *const dst, Rdb_string_reader *const reader, const size_t &size, + const int &exp_digit, const uchar *const zero_pattern, + const uchar *const zero_val, void (*swap_func)(uchar *, const uchar *)) { + const uchar *const from = (const uchar *)reader->read(size); + if (from == nullptr) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + /* Check to see if the value is zero */ + if (memcmp(from, zero_pattern, size) == 0) { + memcpy(dst, zero_val, size); + return UNPACK_SUCCESS; + } + +#if defined(WORDS_BIGENDIAN) + // On big-endian, output can go directly into result + uchar *const tmp = dst; +#else + // Otherwise use a temporary buffer to make byte-swapping easier later + uchar tmp[8]; +#endif + + memcpy(tmp, from, size); + + if (tmp[0] & 0x80) { + // If the high bit is set the original value was positive so + // remove the high bit and subtract one from the exponent. + ushort exp_part = ((ushort)tmp[0] << 8) | (ushort)tmp[1]; + exp_part &= 0x7FFF; // clear high bit; + exp_part -= (ushort)1 << (16 - 1 - exp_digit); // subtract from exponent + tmp[0] = (uchar)(exp_part >> 8); + tmp[1] = (uchar)exp_part; + } else { + // Otherwise the original value was negative and all bytes have been + // negated. + for (size_t ii = 0; ii < size; ii++) + tmp[ii] ^= 0xFF; + } + +#if !defined(WORDS_BIGENDIAN) + // On little-endian, swap the bytes around + swap_func(dst, tmp); +#else + static_assert(swap_func == nullptr, "Assuming that no swapping is needed."); +#endif + + return UNPACK_SUCCESS; +} + +#if !defined(DBL_EXP_DIG) +#define DBL_EXP_DIG (sizeof(double) * 8 - DBL_MANT_DIG) +#endif + +/* + Function of type rdb_index_field_unpack_t + + Unpack a double by doing the reverse action of change_double_for_sort + (sql/filesort.cc). Note that this only works on IEEE values. + Note also that this code assumes that NaN and +/-Infinity are never + allowed in the database. +*/ +static int rdb_unpack_double( + Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), + Field *const field MY_ATTRIBUTE((__unused__)), uchar *const field_ptr, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + static double zero_val = 0.0; + static const uchar zero_pattern[8] = {128, 0, 0, 0, 0, 0, 0, 0}; + + return rdb_unpack_floating_point( + field_ptr, reader, sizeof(double), DBL_EXP_DIG, zero_pattern, + (const uchar *)&zero_val, rdb_swap_double_bytes); +} + +#if !defined(FLT_EXP_DIG) +#define FLT_EXP_DIG (sizeof(float) * 8 - FLT_MANT_DIG) +#endif + +/* + Function of type rdb_index_field_unpack_t + + Unpack a float by doing the reverse action of Field_float::make_sort_key + (sql/field.cc). Note that this only works on IEEE values. + Note also that this code assumes that NaN and +/-Infinity are never + allowed in the database. +*/ +static int rdb_unpack_float( + Rdb_field_packing *const, Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + static float zero_val = 0.0; + static const uchar zero_pattern[4] = {128, 0, 0, 0}; + + return rdb_unpack_floating_point( + field_ptr, reader, sizeof(float), FLT_EXP_DIG, zero_pattern, + (const uchar *)&zero_val, rdb_swap_float_bytes); +} + +/* + Function of type rdb_index_field_unpack_t used to + Unpack by doing the reverse action to Field_newdate::make_sort_key. +*/ + +int rdb_unpack_newdate(Rdb_field_packing *const fpi, Field *constfield, + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) { + const char *from; + DBUG_ASSERT(fpi->m_max_image_len == 3); + + if (!(from = reader->read(3))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + field_ptr[0] = from[2]; + field_ptr[1] = from[1]; + field_ptr[2] = from[0]; + return UNPACK_SUCCESS; +} + +/* + Function of type rdb_index_field_unpack_t, used to + Unpack the string by copying it over. + This is for BINARY(n) where the value occupies the whole length. +*/ + +static int rdb_unpack_binary_str( + Rdb_field_packing *const fpi, Field *const field, uchar *const to, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + const char *from; + if (!(from = reader->read(fpi->m_max_image_len))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + memcpy(to, from, fpi->m_max_image_len); + return UNPACK_SUCCESS; +} + +/* + Function of type rdb_index_field_unpack_t. + For UTF-8, we need to convert 2-byte wide-character entities back into + UTF8 sequences. +*/ + +static int rdb_unpack_utf8_str(Rdb_field_packing *const fpi, Field *const field, + uchar *dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) { + my_core::CHARSET_INFO *const cset = (my_core::CHARSET_INFO *)field->charset(); + const uchar *src; + if (!(src = (const uchar *)reader->read(fpi->m_max_image_len))) + return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + + const uchar *const src_end = src + fpi->m_max_image_len; + uchar *const dst_end = dst + field->pack_length(); + + while (src < src_end) { + my_wc_t wc = (src[0] << 8) | src[1]; + src += 2; + int res = cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res > 0 && res <= 3); + if (res < 0) + return UNPACK_FAILURE; + dst += res; + } + + cset->cset->fill(cset, reinterpret_cast(dst), dst_end - dst, + cset->pad_char); + return UNPACK_SUCCESS; +} + +/* + Function of type rdb_index_field_pack_t +*/ + +static void rdb_pack_with_varchar_encoding( + Rdb_field_packing *const fpi, Field *const field, uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) { + /* + Use a flag byte every Nth byte. Set it to (255 - #pad) where #pad is 0 + when the var length field filled all N-1 previous bytes and #pad is + otherwise the number of padding bytes used. + + If N=8 and the field is: + * 3 bytes (1, 2, 3) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 251 + * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252 + And the 4 byte string compares as greater than the 3 byte string + */ + const CHARSET_INFO *const charset = field->charset(); + Field_varstring *const field_var = (Field_varstring *)field; + + const size_t value_length = (field_var->length_bytes == 1) + ? (uint)*field->ptr + : uint2korr(field->ptr); + size_t xfrm_len = charset->coll->strnxfrm( + charset, buf, fpi->m_max_image_len, field_var->char_length(), + field_var->ptr + field_var->length_bytes, value_length, 0); + + /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ + + size_t encoded_size = 0; + uchar *ptr = *dst; + while (1) { + const size_t copy_len = std::min((size_t)RDB_ESCAPE_LENGTH - 1, xfrm_len); + const size_t padding_bytes = RDB_ESCAPE_LENGTH - 1 - copy_len; + memcpy(ptr, buf, copy_len); + ptr += copy_len; + buf += copy_len; + // pad with zeros if necessary; + for (size_t idx = 0; idx < padding_bytes; idx++) + *(ptr++) = 0; + *(ptr++) = 255 - (uchar)padding_bytes; + + xfrm_len -= copy_len; + encoded_size += RDB_ESCAPE_LENGTH; + if (padding_bytes != 0) + break; + } + *dst += encoded_size; +} + +/* + Compare the string in [buf..buf_end) with a string that is an infinite + sequence of strings in space_xfrm +*/ + +static int +rdb_compare_string_with_spaces(const uchar *buf, const uchar *const buf_end, + const std::vector *const space_xfrm) { + int cmp = 0; + while (buf < buf_end) { + size_t bytes = std::min((size_t)(buf_end - buf), space_xfrm->size()); + if ((cmp = memcmp(buf, space_xfrm->data(), bytes)) != 0) + break; + buf += bytes; + } + return cmp; +} + +static const int RDB_TRIMMED_CHARS_OFFSET = 8; +/* + Pack the data with Variable-Length Space-Padded Encoding. + + The encoding is there to meet two goals: + + Goal#1. Comparison. The SQL standard says + + " If the collation for the comparison has the PAD SPACE characteristic, + for the purposes of the comparison, the shorter value is effectively + extended to the length of the longer by concatenation of s on the + right. + + At the moment, all MySQL collations except one have the PAD SPACE + characteristic. The exception is the "binary" collation that is used by + [VAR]BINARY columns. (Note that binary collations for specific charsets, + like utf8_bin or latin1_bin are not the same as "binary" collation, they have + the PAD SPACE characteristic). + + Goal#2 is to preserve the number of trailing spaces in the original value. + + This is achieved by using the following encoding: + The key part: + - Stores mem-comparable image of the column + - It is stored in chunks of fpi->m_segment_size bytes (*) + = If the remainder of the chunk is not occupied, it is padded with mem- + comparable image of the space character (cs->pad_char to be precise). + - The last byte of the chunk shows how the rest of column's mem-comparable + image would compare to mem-comparable image of the column extended with + spaces. There are three possible values. + - VARCHAR_CMP_LESS_THAN_SPACES, + - VARCHAR_CMP_EQUAL_TO_SPACES + - VARCHAR_CMP_GREATER_THAN_SPACES + + VARCHAR_CMP_EQUAL_TO_SPACES means that this chunk is the last one (the rest + is spaces, or something that sorts as spaces, so there is no reason to store + it). + + Example: if fpi->m_segment_size=5, and the collation is latin1_bin: + + 'abcd\0' => [ 'abcd' ]['\0 ' ] + 'abcd' => [ 'abcd' ] + 'abcd ' => [ 'abcd' ] + 'abcdZZZZ' => [ 'abcd' ][ 'ZZZZ' ] + + As mentioned above, the last chunk is padded with mem-comparable images of + cs->pad_char. It can be 1-byte long (latin1), 2 (utf8_bin), 3 (utf8mb4), etc. + + fpi->m_segment_size depends on the used collation. It is chosen to be such + that no mem-comparable image of space will ever stretch across the segments + (see get_segment_size_from_collation). + + == The value part (aka unpack_info) == + The value part stores the number of space characters that one needs to add + when unpacking the string. + - If the number is positive, it means add this many spaces at the end + - If the number is negative, it means padding has added extra spaces which + must be removed. + + Storage considerations + - depending on column's max size, the number may occupy 1 or 2 bytes + - the number of spaces that need to be removed is not more than + RDB_TRIMMED_CHARS_OFFSET=8, so we offset the number by that value and + then store it as unsigned. + + @seealso + rdb_unpack_binary_or_utf8_varchar_space_pad + rdb_unpack_simple_varchar_space_pad + rdb_dummy_make_unpack_info + rdb_skip_variable_space_pad +*/ + +static void +rdb_pack_with_varchar_space_pad(Rdb_field_packing *const fpi, + Field *const field, uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx) { + Rdb_string_writer *const unpack_info = pack_ctx->writer; + const CHARSET_INFO *const charset = field->charset(); + const auto field_var = static_cast(field); + + const size_t value_length = (field_var->length_bytes == 1) + ? (uint)*field->ptr + : uint2korr(field->ptr); + + const size_t trimmed_len = charset->cset->lengthsp( + charset, (const char *)field_var->ptr + field_var->length_bytes, + value_length); + const size_t xfrm_len = charset->coll->strnxfrm( + charset, buf, fpi->m_max_image_len, field_var->char_length(), + field_var->ptr + field_var->length_bytes, trimmed_len, 0); + + /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ + uchar *const buf_end = buf + xfrm_len; + + size_t encoded_size = 0; + uchar *ptr = *dst; + size_t padding_bytes; + while (true) { + const size_t copy_len = + std::min(fpi->m_segment_size - 1, buf_end - buf); + padding_bytes = fpi->m_segment_size - 1 - copy_len; + memcpy(ptr, buf, copy_len); + ptr += copy_len; + buf += copy_len; + + if (padding_bytes) { + memcpy(ptr, fpi->space_xfrm->data(), padding_bytes); + ptr += padding_bytes; + *ptr = VARCHAR_CMP_EQUAL_TO_SPACES; // last segment + } else { + // Compare the string suffix with a hypothetical infinite string of + // spaces. It could be that the first difference is beyond the end of + // current chunk. + const int cmp = + rdb_compare_string_with_spaces(buf, buf_end, fpi->space_xfrm); + + if (cmp < 0) + *ptr = VARCHAR_CMP_LESS_THAN_SPACES; + else if (cmp > 0) + *ptr = VARCHAR_CMP_GREATER_THAN_SPACES; + else { + // It turns out all the rest are spaces. + *ptr = VARCHAR_CMP_EQUAL_TO_SPACES; + } + } + encoded_size += fpi->m_segment_size; + + if (*(ptr++) == VARCHAR_CMP_EQUAL_TO_SPACES) + break; + } + + // m_unpack_info_stores_value means unpack_info stores the whole original + // value. There is no need to store the number of trimmed/padded endspaces + // in that case. + if (unpack_info && !fpi->m_unpack_info_stores_value) { + // (value_length - trimmed_len) is the number of trimmed space *characters* + // then, padding_bytes is the number of *bytes* added as padding + // then, we add 8, because we don't store negative values. + DBUG_ASSERT(padding_bytes % fpi->space_xfrm_len == 0); + DBUG_ASSERT((value_length - trimmed_len) % fpi->space_mb_len == 0); + const size_t removed_chars = + RDB_TRIMMED_CHARS_OFFSET + + (value_length - trimmed_len) / fpi->space_mb_len - + padding_bytes / fpi->space_xfrm_len; + + if (fpi->m_unpack_info_uses_two_bytes) { + unpack_info->write_uint16(removed_chars); + } else { + DBUG_ASSERT(removed_chars < 0x100); + unpack_info->write_uint8(removed_chars); + } + } + + *dst += encoded_size; +} + +/* + Function of type rdb_index_field_unpack_t +*/ + +static int rdb_unpack_binary_or_utf8_varchar( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + const uchar *ptr; + size_t len = 0; + bool finished = false; + uchar *d0 = dst; + Field_varstring *const field_var = (Field_varstring *)field; + dst += field_var->length_bytes; + // How much we can unpack + size_t dst_len = field_var->pack_length() - field_var->length_bytes; + uchar *const dst_end = dst + dst_len; + + /* Decode the length-emitted encoding here */ + while ((ptr = (const uchar *)reader->read(RDB_ESCAPE_LENGTH))) { + /* See rdb_pack_with_varchar_encoding. */ + uchar pad = 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes + uchar used_bytes = RDB_ESCAPE_LENGTH - 1 - pad; + + if (used_bytes > RDB_ESCAPE_LENGTH - 1) { + return UNPACK_FAILURE; /* cannot store that much, invalid data */ + } + + if (dst_len < used_bytes) { + /* Encoded index tuple is longer than the size in the record buffer? */ + return UNPACK_FAILURE; + } + + /* + Now, we need to decode used_bytes of data and append them to the value. + */ + if (fpi->m_varchar_charset->number == COLLATION_UTF8_BIN) { + if (used_bytes & 1) { + /* + UTF-8 characters are encoded into two-byte entities. There is no way + we can have an odd number of bytes after encoding. + */ + return UNPACK_FAILURE; + } + + const uchar *src = ptr; + const uchar *src_end = ptr + used_bytes; + while (src < src_end) { + my_wc_t wc = (src[0] << 8) | src[1]; + src += 2; + const CHARSET_INFO *cset = fpi->m_varchar_charset; + int res = cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res > 0 && res <= 3); + if (res < 0) + return UNPACK_FAILURE; + dst += res; + len += res; + dst_len -= res; + } + } else { + memcpy(dst, ptr, used_bytes); + dst += used_bytes; + dst_len -= used_bytes; + len += used_bytes; + } + + if (used_bytes < RDB_ESCAPE_LENGTH - 1) { + finished = true; + break; + } + } + + if (!finished) + return UNPACK_FAILURE; + + /* Save the length */ + if (field_var->length_bytes == 1) { + d0[0] = (uchar)len; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + int2store(d0, len); + } + return UNPACK_SUCCESS; +} + +/* + @seealso + rdb_pack_with_varchar_space_pad - packing function + rdb_unpack_simple_varchar_space_pad - unpacking function for 'simple' + charsets. + rdb_skip_variable_space_pad - skip function +*/ +static int rdb_unpack_binary_or_utf8_varchar_space_pad( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, Rdb_string_reader *const unp_reader) { + const uchar *ptr; + size_t len = 0; + bool finished = false; + Field_varstring *const field_var = static_cast(field); + uchar *d0 = dst; + uchar *dst_end = dst + field_var->pack_length(); + dst += field_var->length_bytes; + + uint space_padding_bytes = 0; + uint extra_spaces; + if ((fpi->m_unpack_info_uses_two_bytes + ? unp_reader->read_uint16(&extra_spaces) + : unp_reader->read_uint8(&extra_spaces))) { + return UNPACK_FAILURE; + } + + if (extra_spaces <= RDB_TRIMMED_CHARS_OFFSET) { + space_padding_bytes = + -(static_cast(extra_spaces) - RDB_TRIMMED_CHARS_OFFSET); + extra_spaces = 0; + } else + extra_spaces -= RDB_TRIMMED_CHARS_OFFSET; + + space_padding_bytes *= fpi->space_xfrm_len; + + /* Decode the length-emitted encoding here */ + while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { + const char last_byte = ptr[fpi->m_segment_size - 1]; + size_t used_bytes; + if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) // this is the last segment + { + if (space_padding_bytes > (fpi->m_segment_size - 1)) + return UNPACK_FAILURE; // Cannot happen, corrupted data + used_bytes = (fpi->m_segment_size - 1) - space_padding_bytes; + finished = true; + } else { + if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES && + last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) { + return UNPACK_FAILURE; // Invalid value + } + used_bytes = fpi->m_segment_size - 1; + } + + // Now, need to decode used_bytes of data and append them to the value. + if (fpi->m_varchar_charset->number == COLLATION_UTF8_BIN) { + if (used_bytes & 1) { + /* + UTF-8 characters are encoded into two-byte entities. There is no way + we can have an odd number of bytes after encoding. + */ + return UNPACK_FAILURE; + } + + const uchar *src = ptr; + const uchar *const src_end = ptr + used_bytes; + while (src < src_end) { + my_wc_t wc = (src[0] << 8) | src[1]; + src += 2; + const CHARSET_INFO *cset = fpi->m_varchar_charset; + int res = cset->cset->wc_mb(cset, wc, dst, dst_end); + DBUG_ASSERT(res <= 3); + if (res <= 0) + return UNPACK_FAILURE; + dst += res; + len += res; + } + } else { + if (dst + used_bytes > dst_end) + return UNPACK_FAILURE; + memcpy(dst, ptr, used_bytes); + dst += used_bytes; + len += used_bytes; + } + + if (finished) { + if (extra_spaces) { + // Both binary and UTF-8 charset store space as ' ', + // so the following is ok: + if (dst + extra_spaces > dst_end) + return UNPACK_FAILURE; + memset(dst, fpi->m_varchar_charset->pad_char, extra_spaces); + len += extra_spaces; + } + break; + } + } + + if (!finished) + return UNPACK_FAILURE; + + /* Save the length */ + if (field_var->length_bytes == 1) { + d0[0] = (uchar)len; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + int2store(d0, len); + } + return UNPACK_SUCCESS; +} + +///////////////////////////////////////////////////////////////////////// + +/* + Function of type rdb_make_unpack_info_t +*/ + +static void rdb_make_unpack_unknown( + const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_pack_field_context *const pack_ctx) { + pack_ctx->writer->write(field->ptr, field->pack_length()); +} + +/* + This point of this function is only to indicate that unpack_info is + available. + + The actual unpack_info data is produced by the function that packs the key, + that is, rdb_pack_with_varchar_space_pad. +*/ + +static void rdb_dummy_make_unpack_info( + const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), + const Field *field MY_ATTRIBUTE((__unused__)), + Rdb_pack_field_context *pack_ctx MY_ATTRIBUTE((__unused__))) {} + +/* + Function of type rdb_index_field_unpack_t +*/ + +static int rdb_unpack_unknown(Rdb_field_packing *const fpi, Field *const field, + uchar *const dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { + const uchar *ptr; + const uint len = fpi->m_unpack_data_len; + // We don't use anything from the key, so skip over it. + if (rdb_skip_max_length(fpi, field, reader)) { + return UNPACK_FAILURE; + } + + DBUG_ASSERT_IMP(len > 0, unp_reader != nullptr); + + if ((ptr = (const uchar *)unp_reader->read(len))) { + memcpy(dst, ptr, len); + return UNPACK_SUCCESS; + } + return UNPACK_FAILURE; +} + +/* + Function of type rdb_make_unpack_info_t +*/ + +static void rdb_make_unpack_unknown_varchar( + const Rdb_collation_codec *const codec MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_pack_field_context *const pack_ctx) { + const auto f = static_cast(field); + uint len = f->length_bytes == 1 ? (uint)*f->ptr : uint2korr(f->ptr); + len += f->length_bytes; + pack_ctx->writer->write(field->ptr, len); +} + +/* + Function of type rdb_index_field_unpack_t + + @detail + Unpack a key part in an "unknown" collation from its + (mem_comparable_form, unpack_info) form. + + "Unknown" means we have no clue about how mem_comparable_form is made from + the original string, so we keep the whole original string in the unpack_info. + + @seealso + rdb_make_unpack_unknown, rdb_unpack_unknown +*/ + +static int rdb_unpack_unknown_varchar(Rdb_field_packing *const fpi, + Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { + const uchar *ptr; + uchar *const d0 = dst; + const auto f = static_cast(field); + dst += f->length_bytes; + const uint len_bytes = f->length_bytes; + // We don't use anything from the key, so skip over it. + if (fpi->m_skip_func(fpi, field, reader)) { + return UNPACK_FAILURE; + } + + DBUG_ASSERT(len_bytes > 0); + DBUG_ASSERT(unp_reader != nullptr); + + if ((ptr = (const uchar *)unp_reader->read(len_bytes))) { + memcpy(d0, ptr, len_bytes); + const uint len = len_bytes == 1 ? (uint)*ptr : uint2korr(ptr); + if ((ptr = (const uchar *)unp_reader->read(len))) { + memcpy(dst, ptr, len); + return UNPACK_SUCCESS; + } + } + return UNPACK_FAILURE; +} + +/* + Write unpack_data for a "simple" collation +*/ +static void rdb_write_unpack_simple(Rdb_bit_writer *const writer, + const Rdb_collation_codec *const codec, + const uchar *const src, + const size_t src_len) { + for (uint i = 0; i < src_len; i++) { + writer->write(codec->m_enc_size[src[i]], codec->m_enc_idx[src[i]]); + } +} + +static uint rdb_read_unpack_simple(Rdb_bit_reader *const reader, + const Rdb_collation_codec *const codec, + const uchar *const src, + const size_t &src_len, uchar *const dst) { + for (uint i = 0; i < src_len; i++) { + if (codec->m_dec_size[src[i]] > 0) { + uint *ret; + DBUG_ASSERT(reader != nullptr); + + if ((ret = reader->read(codec->m_dec_size[src[i]])) == nullptr) { + return UNPACK_FAILURE; + } + dst[i] = codec->m_dec_idx[*ret][src[i]]; + } else { + dst[i] = codec->m_dec_idx[0][src[i]]; + } + } + + return UNPACK_SUCCESS; +} + +/* + Function of type rdb_make_unpack_info_t + + @detail + Make unpack_data for VARCHAR(n) in a "simple" charset. +*/ + +static void +rdb_make_unpack_simple_varchar(const Rdb_collation_codec *const codec, + const Field *const field, + Rdb_pack_field_context *const pack_ctx) { + const auto f = static_cast(field); + uchar *const src = f->ptr + f->length_bytes; + const size_t src_len = + f->length_bytes == 1 ? (uint)*f->ptr : uint2korr(f->ptr); + Rdb_bit_writer bit_writer(pack_ctx->writer); + // The std::min compares characters with bytes, but for simple collations, + // mbmaxlen = 1. + rdb_write_unpack_simple(&bit_writer, codec, src, + std::min((size_t)f->char_length(), src_len)); +} + +/* + Function of type rdb_index_field_unpack_t + + @seealso + rdb_pack_with_varchar_space_pad - packing function + rdb_unpack_binary_or_utf8_varchar_space_pad - a similar unpacking function +*/ + +int rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *const fpi, + Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { + const uchar *ptr; + size_t len = 0; + bool finished = false; + uchar *d0 = dst; + const Field_varstring *const field_var = + static_cast(field); + // For simple collations, char_length is also number of bytes. + DBUG_ASSERT((size_t)fpi->m_max_image_len >= field_var->char_length()); + uchar *dst_end = dst + field_var->pack_length(); + dst += field_var->length_bytes; + Rdb_bit_reader bit_reader(unp_reader); + + uint space_padding_bytes = 0; + uint extra_spaces; + DBUG_ASSERT(unp_reader != nullptr); + + if ((fpi->m_unpack_info_uses_two_bytes + ? unp_reader->read_uint16(&extra_spaces) + : unp_reader->read_uint8(&extra_spaces))) { + return UNPACK_FAILURE; + } + + if (extra_spaces <= 8) { + space_padding_bytes = -(static_cast(extra_spaces) - 8); + extra_spaces = 0; + } else + extra_spaces -= 8; + + space_padding_bytes *= fpi->space_xfrm_len; + + /* Decode the length-emitted encoding here */ + while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { + const char last_byte = + ptr[fpi->m_segment_size - 1]; // number of padding bytes + size_t used_bytes; + if (last_byte == VARCHAR_CMP_EQUAL_TO_SPACES) { + // this is the last one + if (space_padding_bytes > (fpi->m_segment_size - 1)) + return UNPACK_FAILURE; // Cannot happen, corrupted data + used_bytes = (fpi->m_segment_size - 1) - space_padding_bytes; + finished = true; + } else { + if (last_byte != VARCHAR_CMP_LESS_THAN_SPACES && + last_byte != VARCHAR_CMP_GREATER_THAN_SPACES) { + return UNPACK_FAILURE; + } + used_bytes = fpi->m_segment_size - 1; + } + + if (dst + used_bytes > dst_end) { + // The value on disk is longer than the field definition allows? + return UNPACK_FAILURE; + } + + uint ret; + if ((ret = rdb_read_unpack_simple(&bit_reader, fpi->m_charset_codec, ptr, + used_bytes, dst)) != UNPACK_SUCCESS) { + return ret; + } + + dst += used_bytes; + len += used_bytes; + + if (finished) { + if (extra_spaces) { + if (dst + extra_spaces > dst_end) + return UNPACK_FAILURE; + // pad_char has a 1-byte form in all charsets that + // are handled by rdb_init_collation_mapping. + memset(dst, field_var->charset()->pad_char, extra_spaces); + len += extra_spaces; + } + break; + } + } + + if (!finished) + return UNPACK_FAILURE; + + /* Save the length */ + if (field_var->length_bytes == 1) { + d0[0] = (uchar)len; + } else { + DBUG_ASSERT(field_var->length_bytes == 2); + int2store(d0, len); + } + return UNPACK_SUCCESS; +} + +/* + Function of type rdb_make_unpack_info_t + + @detail + Make unpack_data for CHAR(n) value in a "simple" charset. + It is CHAR(N), so SQL layer has padded the value with spaces up to N chars. + + @seealso + The VARCHAR variant is in rdb_make_unpack_simple_varchar +*/ + +static void rdb_make_unpack_simple(const Rdb_collation_codec *const codec, + const Field *const field, + Rdb_pack_field_context *const pack_ctx) { + const uchar *const src = field->ptr; + Rdb_bit_writer bit_writer(pack_ctx->writer); + rdb_write_unpack_simple(&bit_writer, codec, src, field->pack_length()); +} + +/* + Function of type rdb_index_field_unpack_t +*/ + +static int rdb_unpack_simple(Rdb_field_packing *const fpi, + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) { + const uchar *ptr; + const uint len = fpi->m_max_image_len; + Rdb_bit_reader bit_reader(unp_reader); + + if (!(ptr = (const uchar *)reader->read(len))) { + return UNPACK_FAILURE; + } + + return rdb_read_unpack_simple(unp_reader ? &bit_reader : nullptr, + fpi->m_charset_codec, ptr, len, dst); +} + +// See Rdb_charset_space_info::spaces_xfrm +const int RDB_SPACE_XFRM_SIZE = 32; + +// A class holding information about how space character is represented in a +// charset. +class Rdb_charset_space_info { +public: + Rdb_charset_space_info(const Rdb_charset_space_info &) = delete; + Rdb_charset_space_info &operator=(const Rdb_charset_space_info &) = delete; + Rdb_charset_space_info() = default; + + // A few strxfrm'ed space characters, at least RDB_SPACE_XFRM_SIZE bytes + std::vector spaces_xfrm; + + // length(strxfrm(' ')) + size_t space_xfrm_len; + + // length of the space character itself + // Typically space is just 0x20 (length=1) but in ucs2 it is 0x00 0x20 + // (length=2) + size_t space_mb_len; +}; + +static std::array, MY_ALL_CHARSETS_SIZE> + rdb_mem_comparable_space; + +/* + @brief + For a given charset, get + - strxfrm(' '), a sample that is at least RDB_SPACE_XFRM_SIZE bytes long. + - length of strxfrm(charset, ' ') + - length of the space character in the charset + + @param cs IN Charset to get the space for + @param ptr OUT A few space characters + @param len OUT Return length of the space (in bytes) + + @detail + It is tempting to pre-generate mem-comparable form of space character for + every charset on server startup. + One can't do that: some charsets are not initialized until somebody + attempts to use them (e.g. create or open a table that has a field that + uses the charset). +*/ + +static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs, + const std::vector **xfrm, + size_t *const xfrm_len, + size_t *const mb_len) { + DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE); + if (!rdb_mem_comparable_space[cs->number].get()) { + RDB_MUTEX_LOCK_CHECK(rdb_mem_cmp_space_mutex); + if (!rdb_mem_comparable_space[cs->number].get()) { + // Upper bound of how many bytes can be occupied by multi-byte form of a + // character in any charset. + const int MAX_MULTI_BYTE_CHAR_SIZE = 4; + DBUG_ASSERT(cs->mbmaxlen <= MAX_MULTI_BYTE_CHAR_SIZE); + + // multi-byte form of the ' ' (space) character + uchar space_mb[MAX_MULTI_BYTE_CHAR_SIZE]; + + const size_t space_mb_len = cs->cset->wc_mb( + cs, (my_wc_t)cs->pad_char, space_mb, space_mb + sizeof(space_mb)); + + uchar space[20]; // mem-comparable image of the space character + + const size_t space_len = cs->coll->strnxfrm(cs, space, sizeof(space), 1, + space_mb, space_mb_len, 0); + Rdb_charset_space_info *const info = new Rdb_charset_space_info; + info->space_xfrm_len = space_len; + info->space_mb_len = space_mb_len; + while (info->spaces_xfrm.size() < RDB_SPACE_XFRM_SIZE) { + info->spaces_xfrm.insert(info->spaces_xfrm.end(), space, + space + space_len); + } + rdb_mem_comparable_space[cs->number].reset(info); + } + RDB_MUTEX_UNLOCK_CHECK(rdb_mem_cmp_space_mutex); + } + + *xfrm = &rdb_mem_comparable_space[cs->number]->spaces_xfrm; + *xfrm_len = rdb_mem_comparable_space[cs->number]->space_xfrm_len; + *mb_len = rdb_mem_comparable_space[cs->number]->space_mb_len; +} + +mysql_mutex_t rdb_mem_cmp_space_mutex; + +std::array + rdb_collation_data; +mysql_mutex_t rdb_collation_data_mutex; + +static bool rdb_is_collation_supported(const my_core::CHARSET_INFO *const cs) { + return cs->strxfrm_multiply==1 && cs->mbmaxlen == 1 && + !(cs->state & (MY_CS_BINSORT | MY_CS_NOPAD)); +} + +static const Rdb_collation_codec * +rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { + DBUG_ASSERT(cs && cs->state & MY_CS_AVAILABLE); + const Rdb_collation_codec *codec = rdb_collation_data[cs->number]; + + if (codec == nullptr && rdb_is_collation_supported(cs)) { + RDB_MUTEX_LOCK_CHECK(rdb_collation_data_mutex); + + codec = rdb_collation_data[cs->number]; + if (codec == nullptr) { + Rdb_collation_codec *cur = nullptr; + + // Compute reverse mapping for simple collations. + if (rdb_is_collation_supported(cs)) { + cur = new Rdb_collation_codec; + std::map> rev_map; + size_t max_conflict_size = 0; + for (int src = 0; src < 256; src++) { + uchar dst = cs->sort_order[src]; + rev_map[dst].push_back(src); + max_conflict_size = std::max(max_conflict_size, rev_map[dst].size()); + } + cur->m_dec_idx.resize(max_conflict_size); + + for (auto const &p : rev_map) { + uchar dst = p.first; + for (uint idx = 0; idx < p.second.size(); idx++) { + uchar src = p.second[idx]; + uchar bits = + my_bit_log2(my_round_up_to_next_power(p.second.size())); + cur->m_enc_idx[src] = idx; + cur->m_enc_size[src] = bits; + cur->m_dec_size[dst] = bits; + cur->m_dec_idx[idx][dst] = src; + } + } + + cur->m_make_unpack_info_func = { + {rdb_make_unpack_simple_varchar, rdb_make_unpack_simple}}; + cur->m_unpack_func = { + {rdb_unpack_simple_varchar_space_pad, rdb_unpack_simple}}; + } else { + // Out of luck for now. + } + + if (cur != nullptr) { + codec = cur; + cur->m_cs = cs; + rdb_collation_data[cs->number] = cur; + } + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_collation_data_mutex); + } + + return codec; +} + +static int get_segment_size_from_collation(const CHARSET_INFO *const cs) { + int ret; + if (cs->number == COLLATION_UTF8MB4_BIN || cs->number == COLLATION_UTF16_BIN || + cs->number == COLLATION_UTF16LE_BIN || cs->number == COLLATION_UTF32_BIN) { + /* + In these collations, a character produces one weight, which is 3 bytes. + Segment has 3 characters, add one byte for VARCHAR_CMP_* marker, and we + get 3*3+1=10 + */ + ret = 10; + } else { + /* + All other collations. There are two classes: + - Unicode-based, except for collations mentioned in the if-condition. + For these all weights are 2 bytes long, a character may produce 0..8 + weights. + in any case, 8 bytes of payload in the segment guarantee that the last + space character won't span across segments. + + - Collations not based on unicode. These have length(strxfrm(' '))=1, + there nothing to worry about. + + In both cases, take 8 bytes payload + 1 byte for VARCHAR_CMP* marker. + */ + ret = 9; + } + DBUG_ASSERT(ret < RDB_SPACE_XFRM_SIZE); + return ret; +} + +/* + @brief + Setup packing of index field into its mem-comparable form + + @detail + - It is possible produce mem-comparable form for any datatype. + - Some datatypes also allow to unpack the original value from its + mem-comparable form. + = Some of these require extra information to be stored in "unpack_info". + unpack_info is not a part of mem-comparable form, it is only used to + restore the original value + + @param + field IN field to be packed/un-packed + + @return + TRUE - Field can be read with index-only reads + FALSE - Otherwise +*/ + +bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, + const Field *const field, const uint &keynr_arg, + const uint &key_part_arg, + const uint16 &key_length) { + int res = false; + enum_field_types type = field ? field->real_type() : MYSQL_TYPE_LONGLONG; + + m_keynr = keynr_arg; + m_key_part = key_part_arg; + + m_maybe_null = field ? field->real_maybe_null() : false; + m_unpack_func = nullptr; + m_make_unpack_info_func = nullptr; + m_unpack_data_len = 0; + space_xfrm = nullptr; // safety + + /* Calculate image length. By default, is is pack_length() */ + m_max_image_len = + field ? field->pack_length() : ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN; + m_skip_func = rdb_skip_max_length; + m_pack_func = rdb_pack_with_make_sort_key; + + switch (type) { + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_TINY: + m_unpack_func = rdb_unpack_integer; + return true; + + case MYSQL_TYPE_DOUBLE: + m_unpack_func = rdb_unpack_double; + return true; + + case MYSQL_TYPE_FLOAT: + m_unpack_func = rdb_unpack_float; + return true; + + case MYSQL_TYPE_NEWDECIMAL: + /* + Decimal is packed with Field_new_decimal::make_sort_key, which just + does memcpy. + Unpacking decimal values was supported only after fix for issue#253, + because of that ha_rocksdb::get_storage_type() handles decimal values + in a special way. + */ + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP2: + /* These are packed with Field_temporal_with_date_and_timef::make_sort_key */ + case MYSQL_TYPE_TIME2: /* TIME is packed with Field_timef::make_sort_key */ + case MYSQL_TYPE_YEAR: /* YEAR is packed with Field_tiny::make_sort_key */ + /* Everything that comes here is packed with just a memcpy(). */ + m_unpack_func = rdb_unpack_binary_str; + return true; + + case MYSQL_TYPE_NEWDATE: + /* + This is packed by Field_newdate::make_sort_key. It assumes the data is + 3 bytes, and packing is done by swapping the byte order (for both big- + and little-endian) + */ + m_unpack_func = rdb_unpack_newdate; + return true; + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: { + if (key_descr) { + // The my_charset_bin collation is special in that it will consider + // shorter strings sorting as less than longer strings. + // + // See Field_blob::make_sort_key for details. + m_max_image_len = + key_length + (field->charset()->number == COLLATION_BINARY + ? reinterpret_cast(field) + ->pack_length_no_ptr() + : 0); + // Return false because indexes on text/blob will always require + // a prefix. With a prefix, the optimizer will not be able to do an + // index-only scan since there may be content occuring after the prefix + // length. + return false; + } + } + default: + break; + } + + m_unpack_info_stores_value = false; + /* Handle [VAR](CHAR|BINARY) */ + + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) { + /* + For CHAR-based columns, check how strxfrm image will take. + field->field_length = field->char_length() * cs->mbmaxlen. + */ + const CHARSET_INFO *cs = field->charset(); + m_max_image_len = cs->coll->strnxfrmlen(cs, field->field_length); + } + const bool is_varchar = (type == MYSQL_TYPE_VARCHAR); + const CHARSET_INFO *cs = field->charset(); + // max_image_len before chunking is taken into account + const int max_image_len_before_chunks = m_max_image_len; + + if (is_varchar) { + // The default for varchar is variable-length, without space-padding for + // comparisons + m_varchar_charset = cs; + m_skip_func = rdb_skip_variable_length; + m_pack_func = rdb_pack_with_varchar_encoding; + m_max_image_len = + (m_max_image_len / (RDB_ESCAPE_LENGTH - 1) + 1) * RDB_ESCAPE_LENGTH; + + const auto field_var = static_cast(field); + m_unpack_info_uses_two_bytes = (field_var->field_length + 8 >= 0x100); + } + + if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_STRING) { + // See http://dev.mysql.com/doc/refman/5.7/en/string-types.html for + // information about character-based datatypes are compared. + bool use_unknown_collation = false; + DBUG_EXECUTE_IF("myrocks_enable_unknown_collation_index_only_scans", + use_unknown_collation = true;); + + if (cs->number == COLLATION_BINARY) { + // - SQL layer pads BINARY(N) so that it always is N bytes long. + // - For VARBINARY(N), values may have different lengths, so we're using + // variable-length encoding. This is also the only charset where the + // values are not space-padded for comparison. + m_unpack_func = is_varchar ? rdb_unpack_binary_or_utf8_varchar + : rdb_unpack_binary_str; + res = true; + } else if (cs->number == COLLATION_LATIN1_BIN || cs->number == COLLATION_UTF8_BIN) { + // For _bin collations, mem-comparable form of the string is the string + // itself. + + if (is_varchar) { + // VARCHARs - are compared as if they were space-padded - but are + // not actually space-padded (reading the value back produces the + // original value, without the padding) + m_unpack_func = rdb_unpack_binary_or_utf8_varchar_space_pad; + m_skip_func = rdb_skip_variable_space_pad; + m_pack_func = rdb_pack_with_varchar_space_pad; + m_make_unpack_info_func = rdb_dummy_make_unpack_info; + m_segment_size = get_segment_size_from_collation(cs); + m_max_image_len = + (max_image_len_before_chunks / (m_segment_size - 1) + 1) * + m_segment_size; + rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, + &space_mb_len); + } else { + // SQL layer pads CHAR(N) values to their maximum length. + // We just store that and restore it back. + m_unpack_func = (cs->number == COLLATION_LATIN1_BIN) ? rdb_unpack_binary_str + : rdb_unpack_utf8_str; + } + res = true; + } else { + // This is [VAR]CHAR(n) and the collation is not $(charset_name)_bin + + res = true; // index-only scans are possible + m_unpack_data_len = is_varchar ? 0 : field->field_length; + const uint idx = is_varchar ? 0 : 1; + const Rdb_collation_codec *codec = nullptr; + + if (is_varchar) { + // VARCHAR requires space-padding for doing comparisons + // + // The check for cs->levels_for_order is to catch + // latin2_czech_cs and cp1250_czech_cs - multi-level collations + // that Variable-Length Space Padded Encoding can't handle. + // It is not expected to work for any other multi-level collations, + // either. + // Currently we handle these collations as NO_PAD, even if they have + // PAD_SPACE attribute. + if (cs->levels_for_order == 1) { + m_pack_func = rdb_pack_with_varchar_space_pad; + m_skip_func = rdb_skip_variable_space_pad; + m_segment_size = get_segment_size_from_collation(cs); + m_max_image_len = + (max_image_len_before_chunks / (m_segment_size - 1) + 1) * + m_segment_size; + rdb_get_mem_comparable_space(cs, &space_xfrm, &space_xfrm_len, + &space_mb_len); + } else { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: you're trying to create an index " + "with a multi-level collation %s", + cs->name); + // NO_LINT_DEBUG + sql_print_warning("MyRocks will handle this collation internally " + " as if it had a NO_PAD attribute."); + m_pack_func = rdb_pack_with_varchar_encoding; + m_skip_func = rdb_skip_variable_length; + } + } + + if ((codec = rdb_init_collation_mapping(cs)) != nullptr) { + // The collation allows to store extra information in the unpack_info + // which can be used to restore the original value from the + // mem-comparable form. + m_make_unpack_info_func = codec->m_make_unpack_info_func[idx]; + m_unpack_func = codec->m_unpack_func[idx]; + m_charset_codec = codec; + } else if (use_unknown_collation) { + // We have no clue about how this collation produces mem-comparable + // form. Our way of restoring the original value is to keep a copy of + // the original value in unpack_info. + m_unpack_info_stores_value = true; + m_make_unpack_info_func = is_varchar ? rdb_make_unpack_unknown_varchar + : rdb_make_unpack_unknown; + m_unpack_func = + is_varchar ? rdb_unpack_unknown_varchar : rdb_unpack_unknown; + } else { + // Same as above: we don't know how to restore the value from its + // mem-comparable form. + // Here, we just indicate to the SQL layer we can't do it. + DBUG_ASSERT(m_unpack_func == nullptr); + m_unpack_info_stores_value = false; + res = false; // Indicate that index-only reads are not possible + } + } + + // Make an adjustment: unpacking partially covered columns is not + // possible. field->table is populated when called through + // Rdb_key_def::setup, but not during ha_rocksdb::index_flags. + if (field->table) { + // Get the original Field object and compare lengths. If this key part is + // a prefix of a column, then we can't do index-only scans. + if (field->table->field[field->field_index]->field_length != key_length) { + m_unpack_func = nullptr; + m_make_unpack_info_func = nullptr; + m_unpack_info_stores_value = true; + res = false; + } + } else { + if (field->field_length != key_length) { + m_unpack_func = nullptr; + m_make_unpack_info_func = nullptr; + m_unpack_info_stores_value = true; + res = false; + } + } + } + return res; +} + +Field *Rdb_field_packing::get_field_in_table(const TABLE *const tbl) const { + return tbl->key_info[m_keynr].key_part[m_key_part].field; +} + +void Rdb_field_packing::fill_hidden_pk_val(uchar **dst, + const longlong &hidden_pk_id) const { + DBUG_ASSERT(m_max_image_len == 8); + + String to; + rdb_netstr_append_uint64(&to, hidden_pk_id); + memcpy(*dst, to.ptr(), m_max_image_len); + + *dst += m_max_image_len; +} + +/////////////////////////////////////////////////////////////////////////////////////////// +// Rdb_ddl_manager +/////////////////////////////////////////////////////////////////////////////////////////// + +Rdb_tbl_def::~Rdb_tbl_def() { + auto ddl_manager = rdb_get_ddl_manager(); + /* Don't free key definitions */ + if (m_key_descr_arr) { + for (uint i = 0; i < m_key_count; i++) { + if (ddl_manager && m_key_descr_arr[i]) { + ddl_manager->erase_index_num(m_key_descr_arr[i]->get_gl_index_id()); + } + + m_key_descr_arr[i] = nullptr; + } + + delete[] m_key_descr_arr; + m_key_descr_arr = nullptr; + } +} + +/* + Put table definition DDL entry. Actual write is done at + Rdb_dict_manager::commit. + + We write + dbname.tablename -> version + {key_entry, key_entry, key_entry, ... } + + Where key entries are a tuple of + ( cf_id, index_nr ) +*/ + +bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, + rocksdb::WriteBatch *const batch, uchar *const key, + const size_t &keylen) { + StringBuffer<8 * Rdb_key_def::PACKED_SIZE> indexes; + indexes.alloc(Rdb_key_def::VERSION_SIZE + + m_key_count * Rdb_key_def::PACKED_SIZE * 2); + rdb_netstr_append_uint16(&indexes, Rdb_key_def::DDL_ENTRY_INDEX_VERSION); + + for (uint i = 0; i < m_key_count; i++) { + const Rdb_key_def &kd = *m_key_descr_arr[i]; + + uchar flags = + (kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | + (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0) | + (kd.m_is_per_partition_cf ? Rdb_key_def::PER_PARTITION_CF_FLAG : 0); + + const uint cf_id = kd.get_cf()->GetID(); + /* + If cf_id already exists, cf_flags must be the same. + To prevent race condition, reading/modifying/committing CF flags + need to be protected by mutex (dict_manager->lock()). + When RocksDB supports transaction with pessimistic concurrency + control, we can switch to use it and removing mutex. + */ + uint existing_cf_flags; + const std::string cf_name = kd.get_cf()->GetName(); + + if (dict->get_cf_flags(cf_id, &existing_cf_flags)) { + // For the purposes of comparison we'll clear the partitioning bit. The + // intent here is to make sure that both partitioned and non-partitioned + // tables can refer to the same CF. + existing_cf_flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE; + flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE; + + if (existing_cf_flags != flags) { + my_printf_error(ER_UNKNOWN_ERROR, + "Column family ('%s') flag (%d) is different from an " + "existing flag (%d). Assign a new CF flag, or do not " + "change existing CF flag.", MYF(0), cf_name.c_str(), + flags, existing_cf_flags); + return true; + } + } else { + dict->add_cf_flags(batch, cf_id, flags); + } + + rdb_netstr_append_uint32(&indexes, cf_id); + rdb_netstr_append_uint32(&indexes, kd.m_index_number); + dict->add_or_update_index_cf_mapping(batch, kd.m_index_type, + kd.m_kv_format_version, + kd.m_index_number, cf_id); + } + + const rocksdb::Slice skey((char *)key, keylen); + const rocksdb::Slice svalue(indexes.c_ptr(), indexes.length()); + + dict->put_key(batch, skey, svalue); + return false; +} + +void Rdb_tbl_def::check_if_is_mysql_system_table() { + static const char *const system_dbs[] = { + "mysql", "performance_schema", "information_schema", + }; + + m_is_mysql_system_table = false; + for (uint ii = 0; ii < array_elements(system_dbs); ii++) { + if (strcmp(m_dbname.c_str(), system_dbs[ii]) == 0) { + m_is_mysql_system_table = true; + break; + } + } +} + +void Rdb_tbl_def::set_name(const std::string &name) { + int err MY_ATTRIBUTE((__unused__)); + + m_dbname_tablename = name; + err = rdb_split_normalized_tablename(name, &m_dbname, &m_tablename, + &m_partition); + DBUG_ASSERT(err == 0); + + check_if_is_mysql_system_table(); +} + +/* + Static function of type my_hash_get_key that gets invoked by + the m_ddl_hash object of type my_core::HASH. + It manufactures a key (db+table name in our case) from a record + (Rdb_tbl_def in our case). +*/ +const uchar * +Rdb_ddl_manager::get_hash_key(Rdb_tbl_def *const rec, size_t *const length, + my_bool not_used MY_ATTRIBUTE((__unused__))) { + const std::string &dbname_tablename = rec->full_tablename(); + *length = dbname_tablename.size(); + return reinterpret_cast(dbname_tablename.c_str()); +} + +/* + Static function of type void (*my_hash_free_element_func_t)(void*) that gets + invoked by the m_ddl_hash object of type my_core::HASH. + It deletes a record (Rdb_tbl_def in our case). +*/ +void Rdb_ddl_manager::free_hash_elem(void *const data) { + Rdb_tbl_def *elem = reinterpret_cast(data); + delete elem; +} + +void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) { + m_index_num_to_keydef.erase(gl_index_id); +} + +void Rdb_ddl_manager::add_uncommitted_keydefs( + const std::unordered_set> &indexes) { + mysql_rwlock_wrlock(&m_rwlock); + for (const auto &index : indexes) { + m_index_num_to_uncommitted_keydef[index->get_gl_index_id()] = index; + } + mysql_rwlock_unlock(&m_rwlock); +} + +void Rdb_ddl_manager::remove_uncommitted_keydefs( + const std::unordered_set> &indexes) { + mysql_rwlock_wrlock(&m_rwlock); + for (const auto &index : indexes) { + m_index_num_to_uncommitted_keydef.erase(index->get_gl_index_id()); + } + mysql_rwlock_unlock(&m_rwlock); +} + +namespace // anonymous namespace = not visible outside this source file +{ +struct Rdb_validate_tbls : public Rdb_tables_scanner { + using tbl_info_t = std::pair; + using tbl_list_t = std::map>; + + tbl_list_t m_list; + + int add_table(Rdb_tbl_def *tdef) override; + + bool compare_to_actual_tables(const std::string &datadir, bool *has_errors); + + bool scan_for_frms(const std::string &datadir, const std::string &dbname, + bool *has_errors); + + bool check_frm_file(const std::string &fullpath, const std::string &dbname, + const std::string &tablename, bool *has_errors); +}; +} // anonymous namespace + +/* + Get a list of tables that we expect to have .frm files for. This will use the + information just read from the RocksDB data dictionary. +*/ +int Rdb_validate_tbls::add_table(Rdb_tbl_def *tdef) { + DBUG_ASSERT(tdef != nullptr); + + /* Add the database/table into the list */ + bool is_partition = tdef->base_partition().size() != 0; + m_list[tdef->base_dbname()].insert( + tbl_info_t(tdef->base_tablename(), is_partition)); + + return HA_EXIT_SUCCESS; +} + +/* + Access the .frm file for this dbname/tablename and see if it is a RocksDB + table (or partition table). +*/ +bool Rdb_validate_tbls::check_frm_file(const std::string &fullpath, + const std::string &dbname, + const std::string &tablename, + bool *has_errors) { + /* Check this .frm file to see what engine it uses */ + String fullfilename(fullpath.c_str(), &my_charset_bin); + fullfilename.append(FN_DIRSEP); + fullfilename.append(tablename.c_str()); + fullfilename.append(".frm"); + + /* + This function will return the legacy_db_type of the table. Currently + it does not reference the first parameter (THD* thd), but if it ever + did in the future we would need to make a version that does it without + the connection handle as we don't have one here. + */ + char eng_type_buf[NAME_CHAR_LEN+1]; + LEX_STRING eng_type_str = {eng_type_buf, 0}; + //enum legacy_db_type eng_type; + frm_type_enum type = dd_frm_type(nullptr, fullfilename.c_ptr(), &eng_type_str); + if (type == FRMTYPE_ERROR) { + sql_print_warning("RocksDB: Failed to open/read .from file: %s", + fullfilename.ptr()); + return false; + } + + if (type == FRMTYPE_TABLE) { + /* For a RocksDB table do we have a reference in the data dictionary? */ + if (!strncmp(eng_type_str.str, "ROCKSDB", eng_type_str.length)) { + /* + Attempt to remove the table entry from the list of tables. If this + fails then we know we had a .frm file that wasn't registered in RocksDB. + */ + tbl_info_t element(tablename, false); + if (m_list.count(dbname) == 0 || m_list[dbname].erase(element) == 0) { + sql_print_warning("RocksDB: Schema mismatch - " + "A .frm file exists for table %s.%s, " + "but that table is not registered in RocksDB", + dbname.c_str(), tablename.c_str()); + *has_errors = true; + } + } else if (!strncmp(eng_type_str.str, "partition", eng_type_str.length)) { + /* + For partition tables, see if it is in the m_list as a partition, + but don't generate an error if it isn't there - we don't know that the + .frm is for RocksDB. + */ + if (m_list.count(dbname) > 0) { + m_list[dbname].erase(tbl_info_t(tablename, true)); + } + } + } + + return true; +} + +/* Scan the database subdirectory for .frm files */ +bool Rdb_validate_tbls::scan_for_frms(const std::string &datadir, + const std::string &dbname, + bool *has_errors) { + bool result = true; + std::string fullpath = datadir + dbname; + struct st_my_dir *dir_info = my_dir(fullpath.c_str(), MYF(MY_DONT_SORT)); + + /* Access the directory */ + if (dir_info == nullptr) { + sql_print_warning("RocksDB: Could not open database directory: %s", + fullpath.c_str()); + return false; + } + + /* Scan through the files in the directory */ + struct fileinfo *file_info = dir_info->dir_entry; + for (uint ii = 0; ii < dir_info->number_of_files; ii++, file_info++) { + /* Find .frm files that are not temp files (those that start with '#') */ + const char *ext = strrchr(file_info->name, '.'); + if (ext != nullptr && !is_prefix(file_info->name, tmp_file_prefix) && + strcmp(ext, ".frm") == 0) { + std::string tablename = + std::string(file_info->name, ext - file_info->name); + + /* Check to see if the .frm file is from RocksDB */ + if (!check_frm_file(fullpath, dbname, tablename, has_errors)) { + result = false; + break; + } + } + } + + /* Remove any databases who have no more tables listed */ + if (m_list.count(dbname) == 1 && m_list[dbname].size() == 0) { + m_list.erase(dbname); + } + + /* Release the directory entry */ + my_dirend(dir_info); + + return result; +} + +/* + Scan the datadir for all databases (subdirectories) and get a list of .frm + files they contain +*/ +bool Rdb_validate_tbls::compare_to_actual_tables(const std::string &datadir, + bool *has_errors) { + bool result = true; + struct st_my_dir *dir_info; + struct fileinfo *file_info; + + dir_info = my_dir(datadir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT)); + if (dir_info == nullptr) { + sql_print_warning("RocksDB: could not open datadir: %s", datadir.c_str()); + return false; + } + + file_info = dir_info->dir_entry; + for (uint ii = 0; ii < dir_info->number_of_files; ii++, file_info++) { + /* Ignore files/dirs starting with '.' */ + if (file_info->name[0] == '.') + continue; + + /* Ignore all non-directory files */ + if (!MY_S_ISDIR(file_info->mystat->st_mode)) + continue; + + /* Scan all the .frm files in the directory */ + if (!scan_for_frms(datadir, file_info->name, has_errors)) { + result = false; + break; + } + } + + /* Release the directory info */ + my_dirend(dir_info); + + return result; +} + +/* + Validate that all the tables in the RocksDB database dictionary match the .frm + files in the datdir +*/ +bool Rdb_ddl_manager::validate_schemas(void) { + bool has_errors = false; + const std::string datadir = std::string(mysql_real_data_home); + Rdb_validate_tbls table_list; + + /* Get the list of tables from the database dictionary */ + if (scan_for_tables(&table_list) != 0) { + return false; + } + + /* Compare that to the list of actual .frm files */ + if (!table_list.compare_to_actual_tables(datadir, &has_errors)) { + return false; + } + + /* + Any tables left in the tables list are ones that are registered in RocksDB + but don't have .frm files. + */ + for (const auto &db : table_list.m_list) { + for (const auto &table : db.second) { + sql_print_warning("RocksDB: Schema mismatch - " + "Table %s.%s is registered in RocksDB " + "but does not have a .frm file", + db.first.c_str(), table.first.c_str()); + has_errors = true; + } + } + + return !has_errors; +} + +bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, + Rdb_cf_manager *const cf_manager, + const uint32_t &validate_tables) { + const ulong TABLE_HASH_SIZE = 32; + m_dict = dict_arg; + mysql_rwlock_init(0, &m_rwlock); + (void)my_hash_init(&m_ddl_hash, + /*system_charset_info*/ &my_charset_bin, TABLE_HASH_SIZE, + 0, 0, (my_hash_get_key)Rdb_ddl_manager::get_hash_key, + Rdb_ddl_manager::free_hash_elem, 0); + + /* Read the data dictionary and populate the hash */ + uchar ddl_entry[Rdb_key_def::INDEX_NUMBER_SIZE]; + rdb_netbuf_store_index(ddl_entry, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + const rocksdb::Slice ddl_entry_slice((char *)ddl_entry, + Rdb_key_def::INDEX_NUMBER_SIZE); + + /* Reading data dictionary should always skip bloom filter */ + rocksdb::Iterator *it = m_dict->new_iterator(); + int i = 0; + + uint max_index_id_in_dict = 0; + m_dict->get_max_index_id(&max_index_id_in_dict); + + for (it->Seek(ddl_entry_slice); it->Valid(); it->Next()) { + const uchar *ptr; + const uchar *ptr_end; + const rocksdb::Slice key = it->key(); + const rocksdb::Slice val = it->value(); + + if (key.size() >= Rdb_key_def::INDEX_NUMBER_SIZE && + memcmp(key.data(), ddl_entry, Rdb_key_def::INDEX_NUMBER_SIZE)) + break; + + if (key.size() <= Rdb_key_def::INDEX_NUMBER_SIZE) { + sql_print_error("RocksDB: Table_store: key has length %d (corruption?)", + (int)key.size()); + return true; + } + + Rdb_tbl_def *const tdef = + new Rdb_tbl_def(key, Rdb_key_def::INDEX_NUMBER_SIZE); + + // Now, read the DDLs. + const int real_val_size = val.size() - Rdb_key_def::VERSION_SIZE; + if (real_val_size % Rdb_key_def::PACKED_SIZE * 2) { + sql_print_error("RocksDB: Table_store: invalid keylist for table %s", + tdef->full_tablename().c_str()); + return true; + } + tdef->m_key_count = real_val_size / (Rdb_key_def::PACKED_SIZE * 2); + tdef->m_key_descr_arr = new std::shared_ptr[tdef->m_key_count]; + + ptr = reinterpret_cast(val.data()); + const int version = rdb_netbuf_read_uint16(&ptr); + if (version != Rdb_key_def::DDL_ENTRY_INDEX_VERSION) { + sql_print_error("RocksDB: DDL ENTRY Version was not expected." + "Expected: %d, Actual: %d", + Rdb_key_def::DDL_ENTRY_INDEX_VERSION, version); + return true; + } + ptr_end = ptr + real_val_size; + for (uint keyno = 0; ptr < ptr_end; keyno++) { + GL_INDEX_ID gl_index_id; + rdb_netbuf_read_gl_index(&ptr, &gl_index_id); + uint16 m_index_dict_version = 0; + uchar m_index_type = 0; + uint16 kv_version = 0; + uint flags = 0; + if (!m_dict->get_index_info(gl_index_id, &m_index_dict_version, + &m_index_type, &kv_version)) { + sql_print_error("RocksDB: Could not get index information " + "for Index Number (%u,%u), table %s", + gl_index_id.cf_id, gl_index_id.index_id, + tdef->full_tablename().c_str()); + return true; + } + if (max_index_id_in_dict < gl_index_id.index_id) { + sql_print_error("RocksDB: Found max index id %u from data dictionary " + "but also found larger index id %u from dictionary. " + "This should never happen and possibly a bug.", + max_index_id_in_dict, gl_index_id.index_id); + return true; + } + if (!m_dict->get_cf_flags(gl_index_id.cf_id, &flags)) { + sql_print_error("RocksDB: Could not get Column Family Flags " + "for CF Number %d, table %s", + gl_index_id.cf_id, tdef->full_tablename().c_str()); + return true; + } + + rocksdb::ColumnFamilyHandle *const cfh = + cf_manager->get_cf(gl_index_id.cf_id); + DBUG_ASSERT(cfh != nullptr); + + /* + We can't fully initialize Rdb_key_def object here, because full + initialization requires that there is an open TABLE* where we could + look at Field* objects and set max_length and other attributes + */ + tdef->m_key_descr_arr[keyno] = std::make_shared( + gl_index_id.index_id, keyno, cfh, m_index_dict_version, m_index_type, + kv_version, flags & Rdb_key_def::REVERSE_CF_FLAG, + flags & Rdb_key_def::AUTO_CF_FLAG, + flags & Rdb_key_def::PER_PARTITION_CF_FLAG, "", + m_dict->get_stats(gl_index_id)); + } + put(tdef); + i++; + } + + /* + If validate_tables is greater than 0 run the validation. Only fail the + initialzation if the setting is 1. If the setting is 2 we continue. + */ + if (validate_tables > 0 && !validate_schemas()) { + if (validate_tables == 1) { + sql_print_error("RocksDB: Problems validating data dictionary " + "against .frm files, exiting"); + return true; + } + } + + // index ids used by applications should not conflict with + // data dictionary index ids + if (max_index_id_in_dict < Rdb_key_def::END_DICT_INDEX_ID) { + max_index_id_in_dict = Rdb_key_def::END_DICT_INDEX_ID; + } + + m_sequence.init(max_index_id_in_dict + 1); + + if (!it->status().ok()) { + const std::string s = it->status().ToString(); + sql_print_error("RocksDB: Table_store: load error: %s", s.c_str()); + return true; + } + delete it; + sql_print_information("RocksDB: Table_store: loaded DDL data for %d tables", + i); + return false; +} + +Rdb_tbl_def *Rdb_ddl_manager::find(const std::string &table_name, + const bool &lock) { + if (lock) { + mysql_rwlock_rdlock(&m_rwlock); + } + + Rdb_tbl_def *const rec = reinterpret_cast(my_hash_search( + &m_ddl_hash, reinterpret_cast(table_name.c_str()), + table_name.size())); + + if (lock) { + mysql_rwlock_unlock(&m_rwlock); + } + + return rec; +} + +// this is a safe version of the find() function below. It acquires a read +// lock on m_rwlock to make sure the Rdb_key_def is not discarded while we +// are finding it. Copying it into 'ret' increments the count making sure +// that the object will not be discarded until we are finished with it. +std::shared_ptr +Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) { + std::shared_ptr ret(nullptr); + + mysql_rwlock_rdlock(&m_rwlock); + + auto it = m_index_num_to_keydef.find(gl_index_id); + if (it != m_index_num_to_keydef.end()) { + const auto table_def = find(it->second.first, false); + if (table_def && it->second.second < table_def->m_key_count) { + const auto &kd = table_def->m_key_descr_arr[it->second.second]; + if (kd->max_storage_fmt_length() != 0) { + ret = kd; + } + } + } else { + auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id); + if (it != m_index_num_to_uncommitted_keydef.end()) { + const auto &kd = it->second; + if (kd->max_storage_fmt_length() != 0) { + ret = kd; + } + } + } + + mysql_rwlock_unlock(&m_rwlock); + + return ret; +} + +// this method assumes at least read-only lock on m_rwlock +const std::shared_ptr & +Rdb_ddl_manager::find(GL_INDEX_ID gl_index_id) { + auto it = m_index_num_to_keydef.find(gl_index_id); + if (it != m_index_num_to_keydef.end()) { + auto table_def = find(it->second.first, false); + if (table_def) { + if (it->second.second < table_def->m_key_count) { + return table_def->m_key_descr_arr[it->second.second]; + } + } + } else { + auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id); + if (it != m_index_num_to_uncommitted_keydef.end()) { + return it->second; + } + } + + static std::shared_ptr empty = nullptr; + + return empty; +} + +void Rdb_ddl_manager::set_stats( + const std::unordered_map &stats) { + mysql_rwlock_wrlock(&m_rwlock); + for (auto src : stats) { + const auto &keydef = find(src.second.m_gl_index_id); + if (keydef) { + keydef->m_stats = src.second; + m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; + } + } + mysql_rwlock_unlock(&m_rwlock); +} + +void Rdb_ddl_manager::adjust_stats( + const std::vector &new_data, + const std::vector &deleted_data) { + mysql_rwlock_wrlock(&m_rwlock); + int i = 0; + for (const auto &data : {new_data, deleted_data}) { + for (const auto &src : data) { + const auto &keydef = find(src.m_gl_index_id); + if (keydef) { + keydef->m_stats.m_distinct_keys_per_prefix.resize( + keydef->get_key_parts()); + keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length()); + m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats; + } + } + i++; + } + const bool should_save_stats = !m_stats2store.empty(); + mysql_rwlock_unlock(&m_rwlock); + if (should_save_stats) { + // Queue an async persist_stats(false) call to the background thread. + rdb_queue_save_stats_request(); + } +} + +void Rdb_ddl_manager::persist_stats(const bool &sync) { + mysql_rwlock_wrlock(&m_rwlock); + const auto local_stats2store = std::move(m_stats2store); + m_stats2store.clear(); + mysql_rwlock_unlock(&m_rwlock); + + // Persist stats + const std::unique_ptr wb = m_dict->begin(); + std::vector stats; + std::transform(local_stats2store.begin(), local_stats2store.end(), + std::back_inserter(stats), + [](const std::pair &s) { + return s.second; + }); + m_dict->add_stats(wb.get(), stats); + m_dict->commit(wb.get(), sync); +} + +/* + Put table definition of `tbl` into the mapping, and also write it to the + on-disk data dictionary. +*/ + +int Rdb_ddl_manager::put_and_write(Rdb_tbl_def *const tbl, + rocksdb::WriteBatch *const batch) { + uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; + uint pos = 0; + + rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + pos += Rdb_key_def::INDEX_NUMBER_SIZE; + + const std::string &dbname_tablename = tbl->full_tablename(); + memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); + pos += dbname_tablename.size(); + + int res; + if ((res = tbl->put_dict(m_dict, batch, buf, pos))) { + return res; + } + if ((res = put(tbl))) { + return res; + } + return HA_EXIT_SUCCESS; +} + +/* Return 0 - ok, other value - error */ +/* TODO: + This function modifies m_ddl_hash and m_index_num_to_keydef. + However, these changes need to be reversed if dict_manager.commit fails + See the discussion here: https://reviews.facebook.net/D35925#inline-259167 + Tracked by https://github.com/facebook/mysql-5.6/issues/33 +*/ +int Rdb_ddl_manager::put(Rdb_tbl_def *const tbl, const bool &lock) { + Rdb_tbl_def *rec; + my_bool result; + const std::string &dbname_tablename = tbl->full_tablename(); + + if (lock) + mysql_rwlock_wrlock(&m_rwlock); + + // We have to do this find because 'tbl' is not yet in the list. We need + // to find the one we are replacing ('rec') + rec = find(dbname_tablename, false); + if (rec) { + // this will free the old record. + my_hash_delete(&m_ddl_hash, reinterpret_cast(rec)); + } + result = my_hash_insert(&m_ddl_hash, reinterpret_cast(tbl)); + + for (uint keyno = 0; keyno < tbl->m_key_count; keyno++) { + m_index_num_to_keydef[tbl->m_key_descr_arr[keyno]->get_gl_index_id()] = + std::make_pair(dbname_tablename, keyno); + } + + if (lock) + mysql_rwlock_unlock(&m_rwlock); + return result; +} + +void Rdb_ddl_manager::remove(Rdb_tbl_def *const tbl, + rocksdb::WriteBatch *const batch, + const bool &lock) { + if (lock) + mysql_rwlock_wrlock(&m_rwlock); + + uchar buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; + uint pos = 0; + + rdb_netbuf_store_index(buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + pos += Rdb_key_def::INDEX_NUMBER_SIZE; + + const std::string &dbname_tablename = tbl->full_tablename(); + memcpy(buf + pos, dbname_tablename.c_str(), dbname_tablename.size()); + pos += dbname_tablename.size(); + + const rocksdb::Slice tkey((char *)buf, pos); + m_dict->delete_key(batch, tkey); + + /* The following will also delete the object: */ + my_hash_delete(&m_ddl_hash, reinterpret_cast(tbl)); + + if (lock) + mysql_rwlock_unlock(&m_rwlock); +} + +bool Rdb_ddl_manager::rename(const std::string &from, const std::string &to, + rocksdb::WriteBatch *const batch) { + Rdb_tbl_def *rec; + Rdb_tbl_def *new_rec; + bool res = true; + uchar new_buf[FN_LEN * 2 + Rdb_key_def::INDEX_NUMBER_SIZE]; + uint new_pos = 0; + + mysql_rwlock_wrlock(&m_rwlock); + if (!(rec = find(from, false))) { + mysql_rwlock_unlock(&m_rwlock); + return true; + } + + new_rec = new Rdb_tbl_def(to); + + new_rec->m_key_count = rec->m_key_count; + new_rec->m_auto_incr_val = + rec->m_auto_incr_val.load(std::memory_order_relaxed); + new_rec->m_key_descr_arr = rec->m_key_descr_arr; + // so that it's not free'd when deleting the old rec + rec->m_key_descr_arr = nullptr; + + // Create a new key + rdb_netbuf_store_index(new_buf, Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER); + new_pos += Rdb_key_def::INDEX_NUMBER_SIZE; + + const std::string &dbname_tablename = new_rec->full_tablename(); + memcpy(new_buf + new_pos, dbname_tablename.c_str(), dbname_tablename.size()); + new_pos += dbname_tablename.size(); + + // Create a key to add + if (!new_rec->put_dict(m_dict, batch, new_buf, new_pos)) { + remove(rec, batch, false); + put(new_rec, false); + res = false; // ok + } + + mysql_rwlock_unlock(&m_rwlock); + return res; +} + +void Rdb_ddl_manager::cleanup() { + my_hash_free(&m_ddl_hash); + mysql_rwlock_destroy(&m_rwlock); + m_sequence.cleanup(); +} + +int Rdb_ddl_manager::scan_for_tables(Rdb_tables_scanner *const tables_scanner) { + int i, ret; + Rdb_tbl_def *rec; + + DBUG_ASSERT(tables_scanner != nullptr); + + mysql_rwlock_rdlock(&m_rwlock); + + ret = 0; + i = 0; + + while (( + rec = reinterpret_cast(my_hash_element(&m_ddl_hash, i)))) { + ret = tables_scanner->add_table(rec); + if (ret) + break; + i++; + } + + mysql_rwlock_unlock(&m_rwlock); + return ret; +} + +/* + Rdb_binlog_manager class implementation +*/ + +bool Rdb_binlog_manager::init(Rdb_dict_manager *const dict_arg) { + DBUG_ASSERT(dict_arg != nullptr); + m_dict = dict_arg; + + rdb_netbuf_store_index(m_key_buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER); + m_key_slice = rocksdb::Slice(reinterpret_cast(m_key_buf), + Rdb_key_def::INDEX_NUMBER_SIZE); + return false; +} + +void Rdb_binlog_manager::cleanup() {} + +/** + Set binlog name, pos and optionally gtid into WriteBatch. + This function should be called as part of transaction commit, + since binlog info is set only at transaction commit. + Actual write into RocksDB is not done here, so checking if + write succeeded or not is not possible here. + @param binlog_name Binlog name + @param binlog_pos Binlog pos + @param binlog_gtid Binlog max GTID + @param batch WriteBatch +*/ +void Rdb_binlog_manager::update(const char *const binlog_name, + const my_off_t binlog_pos, + const char *const binlog_max_gtid, + rocksdb::WriteBatchBase *const batch) { + if (binlog_name && binlog_pos) { + // max binlog length (512) + binlog pos (4) + binlog gtid (57) < 1024 + const size_t RDB_MAX_BINLOG_INFO_LEN = 1024; + uchar value_buf[RDB_MAX_BINLOG_INFO_LEN]; + m_dict->put_key( + batch, m_key_slice, + pack_value(value_buf, binlog_name, binlog_pos, binlog_max_gtid)); + } +} + +/** + Read binlog committed entry stored in RocksDB, then unpack + @param[OUT] binlog_name Binlog name + @param[OUT] binlog_pos Binlog pos + @param[OUT] binlog_gtid Binlog GTID + @return + true is binlog info was found (valid behavior) + false otherwise +*/ +bool Rdb_binlog_manager::read(char *const binlog_name, + my_off_t *const binlog_pos, + char *const binlog_gtid) const { + bool ret = false; + if (binlog_name) { + std::string value; + rocksdb::Status status = m_dict->get_value(m_key_slice, &value); + if (status.ok()) { + if (!unpack_value((const uchar *)value.c_str(), binlog_name, binlog_pos, + binlog_gtid)) + ret = true; + } + } + return ret; +} + +/** + Pack binlog_name, binlog_pos, binlog_gtid into preallocated + buffer, then converting and returning a RocksDB Slice + @param buf Preallocated buffer to set binlog info. + @param binlog_name Binlog name + @param binlog_pos Binlog pos + @param binlog_gtid Binlog GTID + @return rocksdb::Slice converted from buf and its length +*/ +rocksdb::Slice +Rdb_binlog_manager::pack_value(uchar *const buf, const char *const binlog_name, + const my_off_t &binlog_pos, + const char *const binlog_gtid) const { + uint pack_len = 0; + + // store version + rdb_netbuf_store_uint16(buf, Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION); + pack_len += Rdb_key_def::VERSION_SIZE; + + // store binlog file name length + DBUG_ASSERT(strlen(binlog_name) <= FN_REFLEN); + const uint16_t binlog_name_len = (uint16_t)strlen(binlog_name); + rdb_netbuf_store_uint16(buf + pack_len, binlog_name_len); + pack_len += sizeof(uint16); + + // store binlog file name + memcpy(buf + pack_len, binlog_name, binlog_name_len); + pack_len += binlog_name_len; + + // store binlog pos + rdb_netbuf_store_uint32(buf + pack_len, binlog_pos); + pack_len += sizeof(uint32); + + // store binlog gtid length. + // If gtid was not set, store 0 instead + const uint16_t binlog_gtid_len = binlog_gtid ? (uint16_t)strlen(binlog_gtid) : 0; + rdb_netbuf_store_uint16(buf + pack_len, binlog_gtid_len); + pack_len += sizeof(uint16); + + if (binlog_gtid_len > 0) { + // store binlog gtid + memcpy(buf + pack_len, binlog_gtid, binlog_gtid_len); + pack_len += binlog_gtid_len; + } + + return rocksdb::Slice((char *)buf, pack_len); +} + +/** + Unpack value then split into binlog_name, binlog_pos (and binlog_gtid) + @param[IN] value Binlog state info fetched from RocksDB + @param[OUT] binlog_name Binlog name + @param[OUT] binlog_pos Binlog pos + @param[OUT] binlog_gtid Binlog GTID + @return true on error +*/ +bool Rdb_binlog_manager::unpack_value(const uchar *const value, + char *const binlog_name, + my_off_t *const binlog_pos, + char *const binlog_gtid) const { + uint pack_len = 0; + + DBUG_ASSERT(binlog_pos != nullptr); + + // read version + const uint16_t version = rdb_netbuf_to_uint16(value); + pack_len += Rdb_key_def::VERSION_SIZE; + if (version != Rdb_key_def::BINLOG_INFO_INDEX_NUMBER_VERSION) + return true; + + // read binlog file name length + const uint16_t binlog_name_len = rdb_netbuf_to_uint16(value + pack_len); + pack_len += sizeof(uint16); + if (binlog_name_len) { + // read and set binlog name + memcpy(binlog_name, value + pack_len, binlog_name_len); + binlog_name[binlog_name_len] = '\0'; + pack_len += binlog_name_len; + + // read and set binlog pos + *binlog_pos = rdb_netbuf_to_uint32(value + pack_len); + pack_len += sizeof(uint32); + + // read gtid length + const uint16_t binlog_gtid_len = rdb_netbuf_to_uint16(value + pack_len); + pack_len += sizeof(uint16); + if (binlog_gtid && binlog_gtid_len > 0) { + // read and set gtid + memcpy(binlog_gtid, value + pack_len, binlog_gtid_len); + binlog_gtid[binlog_gtid_len] = '\0'; + pack_len += binlog_gtid_len; + } + } + return false; +} + +/** + Inserts a row into mysql.slave_gtid_info table. Doing this inside + storage engine is more efficient than inserting/updating through MySQL. + + @param[IN] id Primary key of the table. + @param[IN] db Database name. This is column 2 of the table. + @param[IN] gtid Gtid in human readable form. This is column 3 of the table. + @param[IN] write_batch Handle to storage engine writer. +*/ +void Rdb_binlog_manager::update_slave_gtid_info( + const uint &id, const char *const db, const char *const gtid, + rocksdb::WriteBatchBase *const write_batch) { + if (id && db && gtid) { + // Make sure that if the slave_gtid_info table exists we have a + // pointer to it via m_slave_gtid_info_tbl. + if (!m_slave_gtid_info_tbl.load()) { + m_slave_gtid_info_tbl.store( + rdb_get_ddl_manager()->find("mysql.slave_gtid_info")); + } + if (!m_slave_gtid_info_tbl.load()) { + // slave_gtid_info table is not present. Simply return. + return; + } + DBUG_ASSERT(m_slave_gtid_info_tbl.load()->m_key_count == 1); + + const std::shared_ptr &kd = + m_slave_gtid_info_tbl.load()->m_key_descr_arr[0]; + String value; + + // Build key + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE + 4] = {0}; + uchar *buf = key_buf; + rdb_netbuf_store_index(buf, kd->get_index_number()); + buf += Rdb_key_def::INDEX_NUMBER_SIZE; + rdb_netbuf_store_uint32(buf, id); + buf += 4; + const rocksdb::Slice key_slice = + rocksdb::Slice((const char *)key_buf, buf - key_buf); + + // Build value + uchar value_buf[128] = {0}; + DBUG_ASSERT(gtid); + const uint db_len = strlen(db); + const uint gtid_len = strlen(gtid); + buf = value_buf; + // 1 byte used for flags. Empty here. + buf++; + + // Write column 1. + DBUG_ASSERT(strlen(db) <= 64); + rdb_netbuf_store_byte(buf, db_len); + buf++; + memcpy(buf, db, db_len); + buf += db_len; + + // Write column 2. + DBUG_ASSERT(gtid_len <= 56); + rdb_netbuf_store_byte(buf, gtid_len); + buf++; + memcpy(buf, gtid, gtid_len); + buf += gtid_len; + const rocksdb::Slice value_slice = + rocksdb::Slice((const char *)value_buf, buf - value_buf); + + write_batch->Put(kd->get_cf(), key_slice, value_slice); + } +} + +bool Rdb_dict_manager::init(rocksdb::DB *const rdb_dict, + Rdb_cf_manager *const cf_manager) { + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + m_db = rdb_dict; + bool is_automatic; + m_system_cfh = cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME, "", + nullptr, &is_automatic); + rdb_netbuf_store_index(m_key_buf_max_index_id, Rdb_key_def::MAX_INDEX_ID); + m_key_slice_max_index_id = + rocksdb::Slice(reinterpret_cast(m_key_buf_max_index_id), + Rdb_key_def::INDEX_NUMBER_SIZE); + resume_drop_indexes(); + rollback_ongoing_index_creation(); + + return (m_system_cfh == nullptr); +} + +std::unique_ptr Rdb_dict_manager::begin() const { + return std::unique_ptr(new rocksdb::WriteBatch); +} + +void Rdb_dict_manager::put_key(rocksdb::WriteBatchBase *const batch, + const rocksdb::Slice &key, + const rocksdb::Slice &value) const { + batch->Put(m_system_cfh, key, value); +} + +rocksdb::Status Rdb_dict_manager::get_value(const rocksdb::Slice &key, + std::string *const value) const { + rocksdb::ReadOptions options; + options.total_order_seek = true; + return m_db->Get(options, m_system_cfh, key, value); +} + +void Rdb_dict_manager::delete_key(rocksdb::WriteBatchBase *batch, + const rocksdb::Slice &key) const { + batch->Delete(m_system_cfh, key); +} + +rocksdb::Iterator *Rdb_dict_manager::new_iterator() const { + /* Reading data dictionary should always skip bloom filter */ + rocksdb::ReadOptions read_options; + read_options.total_order_seek = true; + return m_db->NewIterator(read_options, m_system_cfh); +} + +int Rdb_dict_manager::commit(rocksdb::WriteBatch *const batch, + const bool &sync) const { + if (!batch) + return HA_EXIT_FAILURE; + int res = 0; + rocksdb::WriteOptions options; + options.sync = sync; + rocksdb::Status s = m_db->Write(options, batch); + res = !s.ok(); // we return true when something failed + if (res) { + rdb_handle_io_error(s, RDB_IO_ERROR_DICT_COMMIT); + } + batch->Clear(); + return res; +} + +void Rdb_dict_manager::dump_index_id(uchar *const netbuf, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) { + rdb_netbuf_store_uint32(netbuf, dict_type); + rdb_netbuf_store_uint32(netbuf + Rdb_key_def::INDEX_NUMBER_SIZE, + gl_index_id.cf_id); + rdb_netbuf_store_uint32(netbuf + 2 * Rdb_key_def::INDEX_NUMBER_SIZE, + gl_index_id.index_id); +} + +void Rdb_dict_manager::delete_with_prefix( + rocksdb::WriteBatch *const batch, Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, dict_type, gl_index_id); + rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + + delete_key(batch, key); +} + +void Rdb_dict_manager::add_or_update_index_cf_mapping( + rocksdb::WriteBatch *batch, const uchar m_index_type, + const uint16_t kv_version, const uint32_t index_id, + const uint32_t cf_id) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + uchar value_buf[256] = {0}; + GL_INDEX_ID gl_index_id = {cf_id, index_id}; + dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + + uchar *ptr = value_buf; + rdb_netbuf_store_uint16(ptr, Rdb_key_def::INDEX_INFO_VERSION_LATEST); + ptr += 2; + rdb_netbuf_store_byte(ptr, m_index_type); + ptr += 1; + rdb_netbuf_store_uint16(ptr, kv_version); + ptr += 2; + + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, ptr - value_buf); + batch->Put(m_system_cfh, key, value); +} + +void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch *const batch, + const uint32_t &cf_id, + const uint32_t &cf_flags) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2] = {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE] = + {0}; + rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); + rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + + rdb_netbuf_store_uint16(value_buf, Rdb_key_def::CF_DEFINITION_VERSION); + rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, cf_flags); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, sizeof(value_buf)); + batch->Put(m_system_cfh, key, value); +} + +void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch, + const GL_INDEX_ID &gl_index_id) const { + delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id); + delete_with_prefix(batch, Rdb_key_def::INDEX_STATISTICS, gl_index_id); +} + +bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, + uint16_t *m_index_dict_version, + uchar *m_index_type, + uint16_t *kv_version) const { + bool found = false; + bool error = false; + std::string value; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); + const rocksdb::Slice &key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + + const rocksdb::Status &status = get_value(key, &value); + if (status.ok()) { + const uchar *const val = (const uchar *)value.c_str(); + const uchar *ptr = val; + *m_index_dict_version = rdb_netbuf_to_uint16(val); + *kv_version = 0; + *m_index_type = 0; + ptr += 2; + switch (*m_index_dict_version) { + + case Rdb_key_def::INDEX_INFO_VERSION_VERIFY_KV_FORMAT: + case Rdb_key_def::INDEX_INFO_VERSION_GLOBAL_ID: + *m_index_type = rdb_netbuf_to_byte(ptr); + ptr += 1; + *kv_version = rdb_netbuf_to_uint16(ptr); + found = true; + break; + + default: + error = true; + break; + } + + switch (*m_index_type) { + case Rdb_key_def::INDEX_TYPE_PRIMARY: + case Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY: { + error = *kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + break; + } + case Rdb_key_def::INDEX_TYPE_SECONDARY: + error = *kv_version > Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + break; + default: + error = true; + break; + } + } + + if (error) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Found invalid key version number (%u, %u, %u) " + "from data dictionary. This should never happen " + "and it may be a bug.", + *m_index_dict_version, *m_index_type, *kv_version); + abort_with_stack_traces(); + } + + return found; +} + +bool Rdb_dict_manager::get_cf_flags(const uint32_t &cf_id, + uint32_t *const cf_flags) const { + bool found = false; + std::string value; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2] = {0}; + rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); + rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + + const rocksdb::Status status = get_value(key, &value); + if (status.ok()) { + const uchar *val = (const uchar *)value.c_str(); + uint16_t version = rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::CF_DEFINITION_VERSION) { + *cf_flags = rdb_netbuf_to_uint32(val + Rdb_key_def::VERSION_SIZE); + found = true; + } + } + return found; +} + +/* + Returning index ids that were marked as deleted (via DROP TABLE) but + still not removed by drop_index_thread yet, or indexes that are marked as + ongoing creation. + */ +void Rdb_dict_manager::get_ongoing_index_operation( + std::unordered_set *gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + uchar index_buf[Rdb_key_def::INDEX_NUMBER_SIZE]; + rdb_netbuf_store_uint32(index_buf, dd_type); + const rocksdb::Slice index_slice(reinterpret_cast(index_buf), + Rdb_key_def::INDEX_NUMBER_SIZE); + + rocksdb::Iterator *it = new_iterator(); + for (it->Seek(index_slice); it->Valid(); it->Next()) { + rocksdb::Slice key = it->key(); + const uchar *const ptr = (const uchar *)key.data(); + + /* + Ongoing drop/create index operations require key to be of the form: + dd_type + cf_id + index_id (== INDEX_NUMBER_SIZE * 3) + + This may need to be changed in the future if we want to process a new + ddl_type with different format. + */ + if (key.size() != Rdb_key_def::INDEX_NUMBER_SIZE * 3 || + rdb_netbuf_to_uint32(ptr) != dd_type) { + break; + } + + // We don't check version right now since currently we always store only + // Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION = 1 as a value. + // If increasing version number, we need to add version check logic here. + GL_INDEX_ID gl_index_id; + gl_index_id.cf_id = + rdb_netbuf_to_uint32(ptr + Rdb_key_def::INDEX_NUMBER_SIZE); + gl_index_id.index_id = + rdb_netbuf_to_uint32(ptr + 2 * Rdb_key_def::INDEX_NUMBER_SIZE); + gl_index_ids->insert(gl_index_id); + } + delete it; +} + +/* + Returning true if index_id is create/delete ongoing (undergoing creation or + marked as deleted via DROP TABLE but drop_index_thread has not wiped yet) + or not. + */ +bool Rdb_dict_manager::is_index_operation_ongoing( + const GL_INDEX_ID &gl_index_id, Rdb_key_def::DATA_DICT_TYPE dd_type) const { + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + bool found = false; + std::string value; + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, dd_type, gl_index_id); + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + + const rocksdb::Status status = get_value(key, &value); + if (status.ok()) { + found = true; + } + return found; +} + +/* + Adding index_id to data dictionary so that the index id is removed + by drop_index_thread, or to track online index creation. + */ +void Rdb_dict_manager::start_ongoing_index_operation( + rocksdb::WriteBatch *const batch, const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + uchar value_buf[Rdb_key_def::VERSION_SIZE] = {0}; + dump_index_id(key_buf, dd_type, gl_index_id); + + // version as needed + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) { + rdb_netbuf_store_uint16(value_buf, + Rdb_key_def::DDL_DROP_INDEX_ONGOING_VERSION); + } else { + rdb_netbuf_store_uint16(value_buf, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING_VERSION); + } + + const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, sizeof(value_buf)); + batch->Put(m_system_cfh, key, value); +} + +/* + Removing index_id from data dictionary to confirm drop_index_thread + completed dropping entire key/values of the index_id + */ +void Rdb_dict_manager::end_ongoing_index_operation( + rocksdb::WriteBatch *const batch, const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + delete_with_prefix(batch, dd_type, gl_index_id); +} + +/* + Returning true if there is no target index ids to be removed + by drop_index_thread + */ +bool Rdb_dict_manager::is_drop_index_empty() const { + std::unordered_set gl_index_ids; + get_ongoing_drop_indexes(&gl_index_ids); + return gl_index_ids.empty(); +} + +/* + This function is supposed to be called by DROP TABLE. Logging messages + that dropping indexes started, and adding data dictionary so that + all associated indexes to be removed + */ +void Rdb_dict_manager::add_drop_table( + std::shared_ptr *const key_descr, const uint32 &n_keys, + rocksdb::WriteBatch *const batch) const { + std::unordered_set dropped_index_ids; + for (uint32 i = 0; i < n_keys; i++) { + dropped_index_ids.insert(key_descr[i]->get_gl_index_id()); + } + + add_drop_index(dropped_index_ids, batch); +} + +/* + Called during inplace index drop operations. Logging messages + that dropping indexes started, and adding data dictionary so that + all associated indexes to be removed + */ +void Rdb_dict_manager::add_drop_index( + const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const { + for (const auto &gl_index_id : gl_index_ids) { + log_start_drop_index(gl_index_id, "Begin"); + start_drop_index(batch, gl_index_id); + } +} + +/* + Called during inplace index creation operations. Logging messages + that adding indexes started, and updates data dictionary with all associated + indexes to be added. + */ +void Rdb_dict_manager::add_create_index( + const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const { + for (const auto &gl_index_id : gl_index_ids) { + // NO_LINT_DEBUG + sql_print_information("RocksDB: Begin index creation (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); + start_create_index(batch, gl_index_id); + } +} + +/* + This function is supposed to be called by drop_index_thread, when it + finished dropping any index, or at the completion of online index creation. + */ +void Rdb_dict_manager::finish_indexes_operation( + const std::unordered_set &gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const { + DBUG_ASSERT(dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING || + dd_type == Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + + const std::unique_ptr wb = begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + std::unordered_set incomplete_create_indexes; + get_ongoing_create_indexes(&incomplete_create_indexes); + + for (const auto &gl_index_id : gl_index_ids) { + if (is_index_operation_ongoing(gl_index_id, dd_type)) { + // NO_LINT_DEBUG + sql_print_information("RocksDB: Finished %s (%u,%u)", + dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING + ? "filtering dropped index" + : "index creation", + gl_index_id.cf_id, gl_index_id.index_id); + + end_ongoing_index_operation(batch, gl_index_id, dd_type); + + /* + Remove the corresponding incomplete create indexes from data + dictionary as well + */ + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) { + if (incomplete_create_indexes.count(gl_index_id)) { + end_ongoing_index_operation(batch, gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + } + } + + if (dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING) { + delete_index_info(batch, gl_index_id); + } + } + commit(batch); +} + +/* + This function is supposed to be called when initializing + Rdb_dict_manager (at startup). If there is any index ids that are + drop ongoing, printing out messages for diagnostics purposes. + */ +void Rdb_dict_manager::resume_drop_indexes() const { + std::unordered_set gl_index_ids; + get_ongoing_drop_indexes(&gl_index_ids); + + uint max_index_id_in_dict = 0; + get_max_index_id(&max_index_id_in_dict); + + for (const auto &gl_index_id : gl_index_ids) { + log_start_drop_index(gl_index_id, "Resume"); + if (max_index_id_in_dict < gl_index_id.index_id) { + sql_print_error("RocksDB: Found max index id %u from data dictionary " + "but also found dropped index id (%u,%u) from drop_index " + "dictionary. This should never happen and is possibly a " + "bug.", + max_index_id_in_dict, gl_index_id.cf_id, + gl_index_id.index_id); + abort_with_stack_traces(); + } + } +} + +void Rdb_dict_manager::rollback_ongoing_index_creation() const { + const std::unique_ptr wb = begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + std::unordered_set gl_index_ids; + get_ongoing_create_indexes(&gl_index_ids); + + for (const auto &gl_index_id : gl_index_ids) { + // NO_LINT_DEBUG + sql_print_information("RocksDB: Removing incomplete create index (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); + + start_drop_index(batch, gl_index_id); + } + + commit(batch); +} + +void Rdb_dict_manager::log_start_drop_table( + const std::shared_ptr *const key_descr, const uint32 &n_keys, + const char *const log_action) const { + for (uint32 i = 0; i < n_keys; i++) { + log_start_drop_index(key_descr[i]->get_gl_index_id(), log_action); + } +} + +void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, + const char *log_action) const { + uint16 m_index_dict_version = 0; + uchar m_index_type = 0; + uint16 kv_version = 0; + + if (!get_index_info(gl_index_id, &m_index_dict_version, &m_index_type, + &kv_version)) { + /* + If we don't find the index info, it could be that it's because it was a + partially created index that isn't in the data dictionary yet that needs + to be rolled back. + */ + std::unordered_set incomplete_create_indexes; + get_ongoing_create_indexes(&incomplete_create_indexes); + + if (!incomplete_create_indexes.count(gl_index_id)) { + /* If it's not a partially created index, something is very wrong. */ + sql_print_error("RocksDB: Failed to get column family info " + "from index id (%u,%u). MyRocks data dictionary may " + "get corrupted.", + gl_index_id.cf_id, gl_index_id.index_id); + abort_with_stack_traces(); + } + } + sql_print_information("RocksDB: %s filtering dropped index (%u,%u)", + log_action, gl_index_id.cf_id, gl_index_id.index_id); +} + +bool Rdb_dict_manager::get_max_index_id(uint32_t *const index_id) const { + bool found = false; + std::string value; + + const rocksdb::Status status = get_value(m_key_slice_max_index_id, &value); + if (status.ok()) { + const uchar *const val = (const uchar *)value.c_str(); + const uint16_t &version = rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::MAX_INDEX_ID_VERSION) { + *index_id = rdb_netbuf_to_uint32(val + Rdb_key_def::VERSION_SIZE); + found = true; + } + } + return found; +} + +bool Rdb_dict_manager::update_max_index_id(rocksdb::WriteBatch *const batch, + const uint32_t &index_id) const { + DBUG_ASSERT(batch != nullptr); + + uint32_t old_index_id = -1; + if (get_max_index_id(&old_index_id)) { + if (old_index_id > index_id) { + sql_print_error("RocksDB: Found max index id %u from data dictionary " + "but trying to update to older value %u. This should " + "never happen and possibly a bug.", + old_index_id, index_id); + return true; + } + } + + uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE] = + {0}; + rdb_netbuf_store_uint16(value_buf, Rdb_key_def::MAX_INDEX_ID_VERSION); + rdb_netbuf_store_uint32(value_buf + Rdb_key_def::VERSION_SIZE, index_id); + const rocksdb::Slice value = + rocksdb::Slice((char *)value_buf, sizeof(value_buf)); + batch->Put(m_system_cfh, m_key_slice_max_index_id, value); + return false; +} + +void Rdb_dict_manager::add_stats( + rocksdb::WriteBatch *const batch, + const std::vector &stats) const { + DBUG_ASSERT(batch != nullptr); + + for (const auto &it : stats) { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, it.m_gl_index_id); + + // IndexStats::materialize takes complete care of serialization including + // storing the version + const auto value = + Rdb_index_stats::materialize(std::vector{it}, 1.); + + batch->Put(m_system_cfh, rocksdb::Slice((char *)key_buf, sizeof(key_buf)), + value); + } +} + +Rdb_index_stats Rdb_dict_manager::get_stats(GL_INDEX_ID gl_index_id) const { + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; + dump_index_id(key_buf, Rdb_key_def::INDEX_STATISTICS, gl_index_id); + + std::string value; + const rocksdb::Status status = get_value( + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)), + &value); + if (status.ok()) { + std::vector v; + // unmaterialize checks if the version matches + if (Rdb_index_stats::unmaterialize(value, &v) == 0 && v.size() == 1) { + return v[0]; + } + } + + return Rdb_index_stats(); +} + +uint Rdb_seq_generator::get_and_update_next_number( + Rdb_dict_manager *const dict) { + DBUG_ASSERT(dict != nullptr); + + uint res; + RDB_MUTEX_LOCK_CHECK(m_mutex); + + res = m_next_number++; + + const std::unique_ptr wb = dict->begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + DBUG_ASSERT(batch != nullptr); + dict->update_max_index_id(batch, res); + dict->commit(batch); + + RDB_MUTEX_UNLOCK_CHECK(m_mutex); + + return res; +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h new file mode 100644 index 0000000000000..2ffd3f6e8db1b --- /dev/null +++ b/storage/rocksdb/rdb_datadic.h @@ -0,0 +1,1069 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* C standard header files */ +#ifndef _WIN32 +#include +#endif + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./properties_collector.h" +#include "./rdb_buff.h" +#include "./rdb_utils.h" + +namespace myrocks { + +class Rdb_dict_manager; +class Rdb_key_def; +class Rdb_field_packing; +class Rdb_cf_manager; +class Rdb_ddl_manager; + +/* + @brief + Field packing context. + The idea is to ensure that a call to rdb_index_field_pack_t function + is followed by a call to rdb_make_unpack_info_t. + + @detail + For some datatypes, unpack_info is produced as a side effect of + rdb_index_field_pack_t function call. + For other datatypes, packing is just calling make_sort_key(), while + rdb_make_unpack_info_t is a custom function. + In order to accommodate both cases, we require both calls to be made and + unpack_info is passed as context data between the two. +*/ +class Rdb_pack_field_context { +public: + Rdb_pack_field_context(const Rdb_pack_field_context &) = delete; + Rdb_pack_field_context &operator=(const Rdb_pack_field_context &) = delete; + + explicit Rdb_pack_field_context(Rdb_string_writer *const writer_arg) + : writer(writer_arg) {} + + // NULL means we're not producing unpack_info. + Rdb_string_writer *writer; +}; + +struct Rdb_collation_codec; + +/* + C-style "virtual table" allowing different handling of packing logic based + on the field type. See Rdb_field_packing::setup() implementation. + */ +using rdb_make_unpack_info_t = void (*)(const Rdb_collation_codec *codec, + const Field *field, + Rdb_pack_field_context *pack_ctx); +using rdb_index_field_unpack_t = int (*)(Rdb_field_packing *fpi, Field *field, + uchar *field_ptr, + Rdb_string_reader *reader, + Rdb_string_reader *unpack_reader); +using rdb_index_field_skip_t = int (*)(const Rdb_field_packing *fpi, + const Field *field, + Rdb_string_reader *reader); +using rdb_index_field_pack_t = void (*)(Rdb_field_packing *fpi, Field *field, + uchar *buf, uchar **dst, + Rdb_pack_field_context *pack_ctx); + +const uint RDB_INVALID_KEY_LEN = uint(-1); + +/* How much one checksum occupies when stored in the record */ +const size_t RDB_CHECKSUM_SIZE = sizeof(uint32_t); + +/* + How much the checksum data occupies in record, in total. + It is storing two checksums plus 1 tag-byte. +*/ +const size_t RDB_CHECKSUM_CHUNK_SIZE = 2 * RDB_CHECKSUM_SIZE + 1; + +/* + Checksum data starts from CHECKSUM_DATA_TAG which is followed by two CRC32 + checksums. +*/ +const char RDB_CHECKSUM_DATA_TAG = 0x01; + +/* + Unpack data is variable length. It is a 1 tag-byte plus a + two byte length field. The length field includes the header as well. +*/ +const char RDB_UNPACK_DATA_TAG = 0x02; +const size_t RDB_UNPACK_DATA_LEN_SIZE = sizeof(uint16_t); +const size_t RDB_UNPACK_HEADER_SIZE = + sizeof(RDB_UNPACK_DATA_TAG) + RDB_UNPACK_DATA_LEN_SIZE; + +// Possible return values for rdb_index_field_unpack_t functions. +enum { + UNPACK_SUCCESS = 0, + UNPACK_FAILURE = 1, +}; + +/* + An object of this class represents information about an index in an SQL + table. It provides services to encode and decode index tuples. + + Note: a table (as in, on-disk table) has a single Rdb_key_def object which + is shared across multiple TABLE* objects and may be used simultaneously from + different threads. + + There are several data encodings: + + === SQL LAYER === + SQL layer uses two encodings: + + - "Table->record format". This is the format that is used for the data in + the record buffers, table->record[i] + + - KeyTupleFormat (see opt_range.cc) - this is used in parameters to index + lookup functions, like handler::index_read_map(). + + === Inside RocksDB === + Primary Key is stored as a mapping: + + index_tuple -> StoredRecord + + StoredRecord is in Table->record format, except for blobs, which are stored + in-place. See ha_rocksdb::convert_record_to_storage_format for details. + + Secondary indexes are stored as one of two variants: + + index_tuple -> unpack_info + index_tuple -> empty_string + + index_tuple here is the form of key that can be compared with memcmp(), aka + "mem-comparable form". + + unpack_info is extra data that allows to restore the original value from its + mem-comparable form. It is present only if the index supports index-only + reads. +*/ + +class Rdb_key_def { +public: + /* Convert a key from KeyTupleFormat to mem-comparable form */ + uint pack_index_tuple(TABLE *const tbl, uchar *const pack_buffer, + uchar *const packed_tuple, const uchar *const key_tuple, + const key_part_map &keypart_map) const; + + uchar *pack_field(Field *const field, + Rdb_field_packing *pack_info, + uchar * tuple, + uchar *const packed_tuple, + uchar *const pack_buffer, + Rdb_string_writer *const unpack_info, + uint *const n_null_fields) const; + /* Convert a key from Table->record format to mem-comparable form */ + uint pack_record(const TABLE *const tbl, uchar *const pack_buffer, + const uchar *const record, uchar *const packed_tuple, + Rdb_string_writer *const unpack_info, + const bool &should_store_row_debug_checksums, + const longlong &hidden_pk_id = 0, uint n_key_parts = 0, + uint *const n_null_fields = nullptr) const; + /* Pack the hidden primary key into mem-comparable form. */ + uint pack_hidden_pk(const longlong &hidden_pk_id, + uchar *const packed_tuple) const; + int unpack_field(Rdb_field_packing *const fpi, + Field *const field, + Rdb_string_reader* reader, + const uchar *const default_value, + Rdb_string_reader* unp_reader) const; + int unpack_record(TABLE *const table, uchar *const buf, + const rocksdb::Slice *const packed_key, + const rocksdb::Slice *const unpack_info, + const bool &verify_row_debug_checksums) const; + + static bool unpack_info_has_checksum(const rocksdb::Slice &unpack_info); + int compare_keys(const rocksdb::Slice *key1, const rocksdb::Slice *key2, + std::size_t *const column_index) const; + + size_t key_length(const TABLE *const table, const rocksdb::Slice &key) const; + + /* Get the key that is the "infimum" for this index */ + inline void get_infimum_key(uchar *const key, uint *const size) const { + rdb_netbuf_store_index(key, m_index_number); + *size = INDEX_NUMBER_SIZE; + } + + /* Get the key that is a "supremum" for this index */ + inline void get_supremum_key(uchar *const key, uint *const size) const { + rdb_netbuf_store_index(key, m_index_number + 1); + *size = INDEX_NUMBER_SIZE; + } + + /* Make a key that is right after the given key. */ + static int successor(uchar *const packed_tuple, const uint &len); + + /* + This can be used to compare prefixes. + if X is a prefix of Y, then we consider that X = Y. + */ + // b describes the lookup key, which can be a prefix of a. + int cmp_full_keys(const rocksdb::Slice &a, const rocksdb::Slice &b) const { + DBUG_ASSERT(covers_key(a)); + DBUG_ASSERT(covers_key(b)); + + return memcmp(a.data(), b.data(), std::min(a.size(), b.size())); + } + + /* Check if given mem-comparable key belongs to this index */ + bool covers_key(const rocksdb::Slice &slice) const { + if (slice.size() < INDEX_NUMBER_SIZE) + return false; + + if (memcmp(slice.data(), m_index_number_storage_form, INDEX_NUMBER_SIZE)) + return false; + + return true; + } + + /* + Return true if the passed mem-comparable key + - is from this index, and + - it matches the passed key prefix (the prefix is also in mem-comparable + form) + */ + bool value_matches_prefix(const rocksdb::Slice &value, + const rocksdb::Slice &prefix) const { + return covers_key(value) && !cmp_full_keys(value, prefix); + } + + uint32 get_keyno() const { return m_keyno; } + + uint32 get_index_number() const { return m_index_number; } + + GL_INDEX_ID get_gl_index_id() const { + const GL_INDEX_ID gl_index_id = {m_cf_handle->GetID(), m_index_number}; + return gl_index_id; + } + + int read_memcmp_key_part(const TABLE *table_arg, Rdb_string_reader *reader, + const uint part_num) const; + + /* Must only be called for secondary keys: */ + uint get_primary_key_tuple(const TABLE *const tbl, + const Rdb_key_def &pk_descr, + const rocksdb::Slice *const key, + uchar *const pk_buffer) const; + + uint get_memcmp_sk_parts(const TABLE *table, const rocksdb::Slice &key, + uchar *sk_buffer, uint *n_null_fields) const; + + /* Return max length of mem-comparable form */ + uint max_storage_fmt_length() const { return m_maxlength; } + + uint get_key_parts() const { return m_key_parts; } + + /* + Get a field object for key part #part_no + + @detail + SQL layer thinks unique secondary indexes and indexes in partitioned + tables are not "Extended" with Primary Key columns. + + Internally, we always extend all indexes with PK columns. This function + uses our definition of how the index is Extended. + */ + inline Field *get_table_field_for_part_no(TABLE *table, uint part_no) const; + + const std::string &get_name() const { return m_name; } + + const rocksdb::SliceTransform *get_extractor() const { + return m_prefix_extractor.get(); + } + + Rdb_key_def &operator=(const Rdb_key_def &) = delete; + Rdb_key_def(const Rdb_key_def &k); + Rdb_key_def(uint indexnr_arg, uint keyno_arg, + rocksdb::ColumnFamilyHandle *cf_handle_arg, + uint16_t index_dict_version_arg, uchar index_type_arg, + uint16_t kv_format_version_arg, bool is_reverse_cf_arg, + bool is_auto_cf_arg, bool is_per_partition_cf, const char *name, + Rdb_index_stats stats = Rdb_index_stats()); + ~Rdb_key_def(); + + enum { + INDEX_NUMBER_SIZE = 4, + VERSION_SIZE = 2, + CF_NUMBER_SIZE = 4, + CF_FLAG_SIZE = 4, + PACKED_SIZE = 4, // one int + }; + + // bit flags for combining bools when writing to disk + enum { + REVERSE_CF_FLAG = 1, + AUTO_CF_FLAG = 2, + PER_PARTITION_CF_FLAG = 4, + }; + + // Set of flags to ignore when comparing two CF-s and determining if + // they're same. + static const uint CF_FLAGS_TO_IGNORE = PER_PARTITION_CF_FLAG; + + // Data dictionary types + enum DATA_DICT_TYPE { + DDL_ENTRY_INDEX_START_NUMBER = 1, + INDEX_INFO = 2, + CF_DEFINITION = 3, + BINLOG_INFO_INDEX_NUMBER = 4, + DDL_DROP_INDEX_ONGOING = 5, + INDEX_STATISTICS = 6, + MAX_INDEX_ID = 7, + DDL_CREATE_INDEX_ONGOING = 8, + END_DICT_INDEX_ID = 255 + }; + + // Data dictionary schema version. Introduce newer versions + // if changing schema layout + enum { + DDL_ENTRY_INDEX_VERSION = 1, + CF_DEFINITION_VERSION = 1, + BINLOG_INFO_INDEX_NUMBER_VERSION = 1, + DDL_DROP_INDEX_ONGOING_VERSION = 1, + MAX_INDEX_ID_VERSION = 1, + DDL_CREATE_INDEX_ONGOING_VERSION = 1, + // Version for index stats is stored in IndexStats struct + }; + + // Index info version. Introduce newer versions when changing the + // INDEX_INFO layout. Update INDEX_INFO_VERSION_LATEST to point to the + // latest version number. + enum { + INDEX_INFO_VERSION_INITIAL = 1, // Obsolete + INDEX_INFO_VERSION_KV_FORMAT, + INDEX_INFO_VERSION_GLOBAL_ID, + // There is no change to data format in this version, but this version + // verifies KV format version, whereas previous versions do not. A version + // bump is needed to prevent older binaries from skipping the KV version + // check inadvertently. + INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + // This normally point to the latest (currently it does). + INDEX_INFO_VERSION_LATEST = INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + }; + + // MyRocks index types + enum { + INDEX_TYPE_PRIMARY = 1, + INDEX_TYPE_SECONDARY = 2, + INDEX_TYPE_HIDDEN_PRIMARY = 3, + }; + + // Key/Value format version for each index type + enum { + PRIMARY_FORMAT_VERSION_INITIAL = 10, + // This change includes: + // - For columns that can be unpacked with unpack_info, PK + // stores the unpack_info. + // - DECIMAL datatype is no longer stored in the row (because + // it can be decoded from its mem-comparable form) + // - VARCHAR-columns use endspace-padding. + PRIMARY_FORMAT_VERSION_UPDATE1 = 11, + PRIMARY_FORMAT_VERSION_LATEST = PRIMARY_FORMAT_VERSION_UPDATE1, + + SECONDARY_FORMAT_VERSION_INITIAL = 10, + // This change the SK format to include unpack_info. + SECONDARY_FORMAT_VERSION_UPDATE1 = 11, + SECONDARY_FORMAT_VERSION_LATEST = SECONDARY_FORMAT_VERSION_UPDATE1, + }; + + void setup(const TABLE *const table, const Rdb_tbl_def *const tbl_def); + + rocksdb::ColumnFamilyHandle *get_cf() const { return m_cf_handle; } + + /* Check if keypart #kp can be unpacked from index tuple */ + inline bool can_unpack(const uint &kp) const; + /* Check if keypart #kp needs unpack info */ + inline bool has_unpack_info(const uint &kp) const; + + /* Check if given table has a primary key */ + static bool table_has_hidden_pk(const TABLE *const table); + + void report_checksum_mismatch(const bool &is_key, const char *const data, + const size_t data_size) const; + + /* Check if index is at least pk_min if it is a PK, + or at least sk_min if SK.*/ + bool index_format_min_check(const int &pk_min, const int &sk_min) const; + +private: +#ifndef DBUG_OFF + inline bool is_storage_available(const int &offset, const int &needed) const { + const int storage_length = static_cast(max_storage_fmt_length()); + return (storage_length - offset) >= needed; + } +#endif // DBUG_OFF + + /* Global number of this index (used as prefix in StorageFormat) */ + const uint32 m_index_number; + + uchar m_index_number_storage_form[INDEX_NUMBER_SIZE]; + + rocksdb::ColumnFamilyHandle *m_cf_handle; + +public: + uint16_t m_index_dict_version; + uchar m_index_type; + /* KV format version for the index id */ + uint16_t m_kv_format_version; + /* If true, the column family stores data in the reverse order */ + bool m_is_reverse_cf; + + bool m_is_auto_cf; + + /* If true, then column family is created per partition. */ + bool m_is_per_partition_cf; + + std::string m_name; + mutable Rdb_index_stats m_stats; + +private: + friend class Rdb_tbl_def; // for m_index_number above + + /* Number of key parts in the primary key*/ + uint m_pk_key_parts; + + /* + pk_part_no[X]=Y means that keypart #X of this key is key part #Y of the + primary key. Y==-1 means this column is not present in the primary key. + */ + uint *m_pk_part_no; + + /* Array of index-part descriptors. */ + Rdb_field_packing *m_pack_info; + + uint m_keyno; /* number of this index in the table */ + + /* + Number of key parts in the index (including "index extension"). This is how + many elements are in the m_pack_info array. + */ + uint m_key_parts; + + /* Prefix extractor for the column family of the key definiton */ + std::shared_ptr m_prefix_extractor; + + /* Maximum length of the mem-comparable form. */ + uint m_maxlength; + + /* mutex to protect setup */ + mysql_mutex_t m_mutex; +}; + +// "Simple" collations (those specified in strings/ctype-simple.c) are simple +// because their strnxfrm function maps one byte to one byte. However, the +// mapping is not injective, so the inverse function will take in an extra +// index parameter containing information to disambiguate what the original +// character was. +// +// The m_enc* members are for encoding. Generally, we want encoding to be: +// src -> (dst, idx) +// +// Since strnxfrm already gives us dst, we just need m_enc_idx[src] to give us +// idx. +// +// For the inverse, we have: +// (dst, idx) -> src +// +// We have m_dec_idx[idx][dst] = src to get our original character back. +// +struct Rdb_collation_codec { + const my_core::CHARSET_INFO *m_cs; + // The first element unpacks VARCHAR(n), the second one - CHAR(n). + std::array m_make_unpack_info_func; + std::array m_unpack_func; + + std::array m_enc_idx; + std::array m_enc_size; + + std::array m_dec_size; + std::vector> m_dec_idx; +}; + +extern mysql_mutex_t rdb_collation_data_mutex; +extern mysql_mutex_t rdb_mem_cmp_space_mutex; +extern std::array + rdb_collation_data; + +class Rdb_field_packing { +public: + Rdb_field_packing(const Rdb_field_packing &) = delete; + Rdb_field_packing &operator=(const Rdb_field_packing &) = delete; + Rdb_field_packing() = default; + + /* Length of mem-comparable image of the field, in bytes */ + int m_max_image_len; + + /* Length of image in the unpack data */ + int m_unpack_data_len; + int m_unpack_data_offset; + + bool m_maybe_null; /* TRUE <=> NULL-byte is stored */ + + /* + Valid only for VARCHAR fields. + */ + const CHARSET_INFO *m_varchar_charset; + + // (Valid when Variable Length Space Padded Encoding is used): + uint m_segment_size; // size of segment used + + // number of bytes used to store number of trimmed (or added) + // spaces in the upack_info + bool m_unpack_info_uses_two_bytes; + + const std::vector *space_xfrm; + size_t space_xfrm_len; + size_t space_mb_len; + + const Rdb_collation_codec *m_charset_codec; + + /* + @return TRUE: this field makes use of unpack_info. + */ + bool uses_unpack_info() const { return (m_make_unpack_info_func != nullptr); } + + /* TRUE means unpack_info stores the original field value */ + bool m_unpack_info_stores_value; + + rdb_index_field_pack_t m_pack_func; + rdb_make_unpack_info_t m_make_unpack_info_func; + + /* + This function takes + - mem-comparable form + - unpack_info data + and restores the original value. + */ + rdb_index_field_unpack_t m_unpack_func; + + /* + This function skips over mem-comparable form. + */ + rdb_index_field_skip_t m_skip_func; + +private: + /* + Location of the field in the table (key number and key part number). + + Note that this describes not the field, but rather a position of field in + the index. Consider an example: + + col1 VARCHAR (100), + INDEX idx1 (col1)), + INDEX idx2 (col1(10)), + + Here, idx2 has a special Field object that is set to describe a 10-char + prefix of col1. + + We must also store the keynr. It is needed for implicit "extended keys". + Every key in MyRocks needs to include PK columns. Generally, SQL layer + includes PK columns as part of its "Extended Keys" feature, but sometimes + it does not (known examples are unique secondary indexes and partitioned + tables). + In that case, MyRocks's index descriptor has invisible suffix of PK + columns (and the point is that these columns are parts of PK, not parts + of the current index). + */ + uint m_keynr; + uint m_key_part; + +public: + bool setup(const Rdb_key_def *const key_descr, const Field *const field, + const uint &keynr_arg, const uint &key_part_arg, + const uint16 &key_length); + Field *get_field_in_table(const TABLE *const tbl) const; + void fill_hidden_pk_val(uchar **dst, const longlong &hidden_pk_id) const; +}; + +/* + Descriptor telling how to decode/encode a field to on-disk record storage + format. Not all information is in the structure yet, but eventually we + want to have as much as possible there to avoid virtual calls. + + For encoding/decoding of index tuples, see Rdb_key_def. + */ +class Rdb_field_encoder { +public: + Rdb_field_encoder(const Rdb_field_encoder &) = delete; + Rdb_field_encoder &operator=(const Rdb_field_encoder &) = delete; + /* + STORE_NONE is set when a column can be decoded solely from their + mem-comparable form. + STORE_SOME is set when a column can be decoded from their mem-comparable + form plus unpack_info. + STORE_ALL is set when a column cannot be decoded, so its original value + must be stored in the PK records. + */ + enum STORAGE_TYPE { + STORE_NONE, + STORE_SOME, + STORE_ALL, + }; + STORAGE_TYPE m_storage_type; + + uint m_null_offset; + uint16 m_field_index; + + uchar m_null_mask; // 0 means the field cannot be null + + my_core::enum_field_types m_field_type; + + uint m_pack_length_in_rec; + + bool maybe_null() const { return m_null_mask != 0; } + + bool uses_variable_len_encoding() const { + return (m_field_type == MYSQL_TYPE_BLOB || + m_field_type == MYSQL_TYPE_VARCHAR); + } +}; + +inline Field *Rdb_key_def::get_table_field_for_part_no(TABLE *table, + uint part_no) const { + DBUG_ASSERT(part_no < get_key_parts()); + return m_pack_info[part_no].get_field_in_table(table); +} + +inline bool Rdb_key_def::can_unpack(const uint &kp) const { + DBUG_ASSERT(kp < m_key_parts); + return (m_pack_info[kp].m_unpack_func != nullptr); +} + +inline bool Rdb_key_def::has_unpack_info(const uint &kp) const { + DBUG_ASSERT(kp < m_key_parts); + return m_pack_info[kp].uses_unpack_info(); +} + +/* + A table definition. This is an entry in the mapping + + dbname.tablename -> {index_nr, index_nr, ... } + + There is only one Rdb_tbl_def object for a given table. + That's why we keep auto_increment value here, too. +*/ + +class Rdb_tbl_def { +private: + void check_if_is_mysql_system_table(); + + /* Stores 'dbname.tablename' */ + std::string m_dbname_tablename; + + /* Store the db name, table name, and partition name */ + std::string m_dbname; + std::string m_tablename; + std::string m_partition; + + void set_name(const std::string &name); + +public: + Rdb_tbl_def(const Rdb_tbl_def &) = delete; + Rdb_tbl_def &operator=(const Rdb_tbl_def &) = delete; + + explicit Rdb_tbl_def(const std::string &name) + : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { + set_name(name); + } + + Rdb_tbl_def(const char *const name, const size_t &len) + : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { + set_name(std::string(name, len)); + } + + explicit Rdb_tbl_def(const rocksdb::Slice &slice, const size_t &pos = 0) + : m_key_descr_arr(nullptr), m_hidden_pk_val(1), m_auto_incr_val(1) { + set_name(std::string(slice.data() + pos, slice.size() - pos)); + } + + ~Rdb_tbl_def(); + + /* Number of indexes */ + uint m_key_count; + + /* Array of index descriptors */ + std::shared_ptr *m_key_descr_arr; + + std::atomic m_hidden_pk_val; + std::atomic m_auto_incr_val; + + /* Is this a system table */ + bool m_is_mysql_system_table; + + bool put_dict(Rdb_dict_manager *const dict, rocksdb::WriteBatch *const batch, + uchar *const key, const size_t &keylen); + + const std::string &full_tablename() const { return m_dbname_tablename; } + const std::string &base_dbname() const { return m_dbname; } + const std::string &base_tablename() const { return m_tablename; } + const std::string &base_partition() const { return m_partition; } +}; + +/* + A thread-safe sequential number generator. Its performance is not a concern + hence it is ok to protect it by a mutex. +*/ + +class Rdb_seq_generator { + uint m_next_number = 0; + + mysql_mutex_t m_mutex; + +public: + Rdb_seq_generator(const Rdb_seq_generator &) = delete; + Rdb_seq_generator &operator=(const Rdb_seq_generator &) = delete; + Rdb_seq_generator() = default; + + void init(const uint &initial_number) { + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + m_next_number = initial_number; + } + + uint get_and_update_next_number(Rdb_dict_manager *const dict); + + void cleanup() { mysql_mutex_destroy(&m_mutex); } +}; + +interface Rdb_tables_scanner { + virtual int add_table(Rdb_tbl_def * tdef) = 0; +}; + +/* + This contains a mapping of + + dbname.table_name -> array{Rdb_key_def}. + + objects are shared among all threads. +*/ + +class Rdb_ddl_manager { + Rdb_dict_manager *m_dict = nullptr; + my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements + // Maps index id to + std::map> m_index_num_to_keydef; + + // Maps index id to key definitons not yet committed to data dictionary. + // This is mainly used to store key definitions during ALTER TABLE. + std::map> + m_index_num_to_uncommitted_keydef; + mysql_rwlock_t m_rwlock; + + Rdb_seq_generator m_sequence; + // A queue of table stats to write into data dictionary + // It is produced by event listener (ie compaction and flush threads) + // and consumed by the rocksdb background thread + std::map m_stats2store; + + const std::shared_ptr &find(GL_INDEX_ID gl_index_id); + +public: + Rdb_ddl_manager(const Rdb_ddl_manager &) = delete; + Rdb_ddl_manager &operator=(const Rdb_ddl_manager &) = delete; + Rdb_ddl_manager() {} + + /* Load the data dictionary from on-disk storage */ + bool init(Rdb_dict_manager *const dict_arg, Rdb_cf_manager *const cf_manager, + const uint32_t &validate_tables); + + void cleanup(); + + Rdb_tbl_def *find(const std::string &table_name, const bool &lock = true); + std::shared_ptr safe_find(GL_INDEX_ID gl_index_id); + void set_stats(const std::unordered_map &stats); + void adjust_stats(const std::vector &new_data, + const std::vector &deleted_data = + std::vector()); + void persist_stats(const bool &sync = false); + + /* Modify the mapping and write it to on-disk storage */ + int put_and_write(Rdb_tbl_def *const key_descr, + rocksdb::WriteBatch *const batch); + void remove(Rdb_tbl_def *const rec, rocksdb::WriteBatch *const batch, + const bool &lock = true); + bool rename(const std::string &from, const std::string &to, + rocksdb::WriteBatch *const batch); + + uint get_and_update_next_number(Rdb_dict_manager *const dict) { + return m_sequence.get_and_update_next_number(dict); + } + + /* Walk the data dictionary */ + int scan_for_tables(Rdb_tables_scanner *tables_scanner); + + void erase_index_num(const GL_INDEX_ID &gl_index_id); + void add_uncommitted_keydefs( + const std::unordered_set> &indexes); + void remove_uncommitted_keydefs( + const std::unordered_set> &indexes); + +private: + /* Put the data into in-memory table (only) */ + int put(Rdb_tbl_def *const key_descr, const bool &lock = true); + + /* Helper functions to be passed to my_core::HASH object */ + static const uchar *get_hash_key(Rdb_tbl_def *const rec, size_t *const length, + my_bool not_used MY_ATTRIBUTE((unused))); + static void free_hash_elem(void *const data); + + bool validate_schemas(); +}; + +/* + Writing binlog information into RocksDB at commit(), + and retrieving binlog information at crash recovery. + commit() and recovery are always executed by at most single client + at the same time, so concurrency control is not needed. + + Binlog info is stored in RocksDB as the following. + key: BINLOG_INFO_INDEX_NUMBER + value: packed single row: + binlog_name_length (2 byte form) + binlog_name + binlog_position (4 byte form) + binlog_gtid_length (2 byte form) + binlog_gtid +*/ +class Rdb_binlog_manager { +public: + Rdb_binlog_manager(const Rdb_binlog_manager &) = delete; + Rdb_binlog_manager &operator=(const Rdb_binlog_manager &) = delete; + Rdb_binlog_manager() = default; + + bool init(Rdb_dict_manager *const dict); + void cleanup(); + void update(const char *const binlog_name, const my_off_t binlog_pos, + const char *const binlog_max_gtid, + rocksdb::WriteBatchBase *const batch); + bool read(char *const binlog_name, my_off_t *const binlog_pos, + char *const binlog_gtid) const; + void update_slave_gtid_info(const uint &id, const char *const db, + const char *const gtid, + rocksdb::WriteBatchBase *const write_batch); + +private: + Rdb_dict_manager *m_dict = nullptr; + uchar m_key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; + rocksdb::Slice m_key_slice; + + rocksdb::Slice pack_value(uchar *const buf, const char *const binlog_name, + const my_off_t &binlog_pos, + const char *const binlog_gtid) const; + bool unpack_value(const uchar *const value, char *const binlog_name, + my_off_t *const binlog_pos, char *const binlog_gtid) const; + + std::atomic m_slave_gtid_info_tbl; +}; + +/* + Rdb_dict_manager manages how MySQL on RocksDB (MyRocks) stores its + internal data dictionary. + MyRocks stores data dictionary on dedicated system column family + named __system__. The system column family is used by MyRocks + internally only, and not used by applications. + + Currently MyRocks has the following data dictionary data models. + + 1. Table Name => internal index id mappings + key: Rdb_key_def::DDL_ENTRY_INDEX_START_NUMBER(0x1) + dbname.tablename + value: version, {cf_id, index_id}*n_indexes_of_the_table + version is 2 bytes. cf_id and index_id are 4 bytes. + + 2. internal cf_id, index id => index information + key: Rdb_key_def::INDEX_INFO(0x2) + cf_id + index_id + value: version, index_type, kv_format_version + index_type is 1 byte, version and kv_format_version are 2 bytes. + + 3. CF id => CF flags + key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id + value: version, {is_reverse_cf, is_auto_cf, is_per_partition_cf} + cf_flags is 4 bytes in total. + + 4. Binlog entry (updated at commit) + key: Rdb_key_def::BINLOG_INFO_INDEX_NUMBER (0x4) + value: version, {binlog_name,binlog_pos,binlog_gtid} + + 5. Ongoing drop index entry + key: Rdb_key_def::DDL_DROP_INDEX_ONGOING(0x5) + cf_id + index_id + value: version + + 6. index stats + key: Rdb_key_def::INDEX_STATISTICS(0x6) + cf_id + index_id + value: version, {materialized PropertiesCollector::IndexStats} + + 7. maximum index id + key: Rdb_key_def::MAX_INDEX_ID(0x7) + value: index_id + index_id is 4 bytes + + 8. Ongoing create index entry + key: Rdb_key_def::DDL_CREATE_INDEX_ONGOING(0x8) + cf_id + index_id + value: version + + Data dictionary operations are atomic inside RocksDB. For example, + when creating a table with two indexes, it is necessary to call Put + three times. They have to be atomic. Rdb_dict_manager has a wrapper function + begin() and commit() to make it easier to do atomic operations. + +*/ +class Rdb_dict_manager { +private: + mysql_mutex_t m_mutex; + rocksdb::DB *m_db = nullptr; + rocksdb::ColumnFamilyHandle *m_system_cfh = nullptr; + /* Utility to put INDEX_INFO and CF_DEFINITION */ + + uchar m_key_buf_max_index_id[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; + rocksdb::Slice m_key_slice_max_index_id; + + static void dump_index_id(uchar *const netbuf, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id); + void delete_with_prefix(rocksdb::WriteBatch *const batch, + Rdb_key_def::DATA_DICT_TYPE dict_type, + const GL_INDEX_ID &gl_index_id) const; + /* Functions for fast DROP TABLE/INDEX */ + void resume_drop_indexes() const; + void log_start_drop_table(const std::shared_ptr *const key_descr, + const uint32 &n_keys, + const char *const log_action) const; + void log_start_drop_index(GL_INDEX_ID gl_index_id, + const char *log_action) const; + +public: + Rdb_dict_manager(const Rdb_dict_manager &) = delete; + Rdb_dict_manager &operator=(const Rdb_dict_manager &) = delete; + Rdb_dict_manager() = default; + + bool init(rocksdb::DB *const rdb_dict, Rdb_cf_manager *const cf_manager); + + inline void cleanup() { mysql_mutex_destroy(&m_mutex); } + + inline void lock() { RDB_MUTEX_LOCK_CHECK(m_mutex); } + + inline void unlock() { RDB_MUTEX_UNLOCK_CHECK(m_mutex); } + + /* Raw RocksDB operations */ + std::unique_ptr begin() const; + int commit(rocksdb::WriteBatch *const batch, const bool &sync = true) const; + rocksdb::Status get_value(const rocksdb::Slice &key, + std::string *const value) const; + void put_key(rocksdb::WriteBatchBase *const batch, const rocksdb::Slice &key, + const rocksdb::Slice &value) const; + void delete_key(rocksdb::WriteBatchBase *batch, + const rocksdb::Slice &key) const; + rocksdb::Iterator *new_iterator() const; + + /* Internal Index id => CF */ + void add_or_update_index_cf_mapping(rocksdb::WriteBatch *batch, + const uchar index_type, + const uint16_t kv_version, + const uint index_id, + const uint cf_id) const; + void delete_index_info(rocksdb::WriteBatch *batch, + const GL_INDEX_ID &index_id) const; + bool get_index_info(const GL_INDEX_ID &gl_index_id, + uint16_t *index_dict_version, uchar *index_type, + uint16_t *kv_version) const; + + /* CF id => CF flags */ + void add_cf_flags(rocksdb::WriteBatch *const batch, const uint &cf_id, + const uint &cf_flags) const; + bool get_cf_flags(const uint &cf_id, uint *const cf_flags) const; + + /* Functions for fast CREATE/DROP TABLE/INDEX */ + void + get_ongoing_index_operation(std::unordered_set *gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + bool is_index_operation_ongoing(const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + void start_ongoing_index_operation(rocksdb::WriteBatch *batch, + const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + void end_ongoing_index_operation(rocksdb::WriteBatch *const batch, + const GL_INDEX_ID &gl_index_id, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + bool is_drop_index_empty() const; + void add_drop_table(std::shared_ptr *const key_descr, + const uint32 &n_keys, + rocksdb::WriteBatch *const batch) const; + void add_drop_index(const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const; + void add_create_index(const std::unordered_set &gl_index_ids, + rocksdb::WriteBatch *const batch) const; + void + finish_indexes_operation(const std::unordered_set &gl_index_ids, + Rdb_key_def::DATA_DICT_TYPE dd_type) const; + void rollback_ongoing_index_creation() const; + + inline void get_ongoing_drop_indexes( + std::unordered_set *gl_index_ids) const { + get_ongoing_index_operation(gl_index_ids, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline void get_ongoing_create_indexes( + std::unordered_set *gl_index_ids) const { + get_ongoing_index_operation(gl_index_ids, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + inline void start_drop_index(rocksdb::WriteBatch *wb, + const GL_INDEX_ID &gl_index_id) const { + start_ongoing_index_operation(wb, gl_index_id, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline void start_create_index(rocksdb::WriteBatch *wb, + const GL_INDEX_ID &gl_index_id) const { + start_ongoing_index_operation(wb, gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + inline void finish_drop_indexes( + const std::unordered_set &gl_index_ids) const { + finish_indexes_operation(gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline void finish_create_indexes( + const std::unordered_set &gl_index_ids) const { + finish_indexes_operation(gl_index_ids, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + inline bool is_drop_index_ongoing(const GL_INDEX_ID &gl_index_id) const { + return is_index_operation_ongoing(gl_index_id, + Rdb_key_def::DDL_DROP_INDEX_ONGOING); + } + inline bool is_create_index_ongoing(const GL_INDEX_ID &gl_index_id) const { + return is_index_operation_ongoing(gl_index_id, + Rdb_key_def::DDL_CREATE_INDEX_ONGOING); + } + + bool get_max_index_id(uint32_t *const index_id) const; + bool update_max_index_id(rocksdb::WriteBatch *const batch, + const uint32_t &index_id) const; + void add_stats(rocksdb::WriteBatch *const batch, + const std::vector &stats) const; + Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id) const; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc new file mode 100644 index 0000000000000..ca6fc914d4ae7 --- /dev/null +++ b/storage/rocksdb/rdb_i_s.cc @@ -0,0 +1,1582 @@ +/* + Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include + +/* C++ standard header files */ +#include +#include +#include + +/* MySQL header files */ +#include + +/* RocksDB header files */ +#include "rocksdb/compaction_filter.h" +#include "rocksdb/convenience.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/utilities/transaction_db.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./ha_rocksdb_proto.h" +#include "./rdb_cf_manager.h" +#include "./rdb_datadic.h" +#include "./rdb_utils.h" + +namespace myrocks { + +/** + Define the INFORMATION_SCHEMA (I_S) structures needed by MyRocks storage + engine. +*/ + +#define ROCKSDB_FIELD_INFO(_name_, _len_, _type_, _flag_) \ + { _name_, _len_, _type_, 0, _flag_, nullptr, 0 } + +#define ROCKSDB_FIELD_INFO_END \ + ROCKSDB_FIELD_INFO(nullptr, 0, MYSQL_TYPE_NULL, 0) + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_CFSTATS dynamic table + */ +namespace RDB_CFSTATS_FIELD { +enum { CF_NAME = 0, STAT_TYPE, VALUE }; +} // namespace RDB_CFSTATS_FIELD + +static ST_FIELD_INFO rdb_i_s_cfstats_fields_info[] = { + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; + +static int rdb_i_s_cfstats_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + bool ret; + uint64_t val; + + const std::vector> cf_properties = { + {rocksdb::DB::Properties::kNumImmutableMemTable, + "NUM_IMMUTABLE_MEM_TABLE"}, + {rocksdb::DB::Properties::kMemTableFlushPending, + "MEM_TABLE_FLUSH_PENDING"}, + {rocksdb::DB::Properties::kCompactionPending, "COMPACTION_PENDING"}, + {rocksdb::DB::Properties::kCurSizeActiveMemTable, + "CUR_SIZE_ACTIVE_MEM_TABLE"}, + {rocksdb::DB::Properties::kCurSizeAllMemTables, + "CUR_SIZE_ALL_MEM_TABLES"}, + {rocksdb::DB::Properties::kNumEntriesActiveMemTable, + "NUM_ENTRIES_ACTIVE_MEM_TABLE"}, + {rocksdb::DB::Properties::kNumEntriesImmMemTables, + "NUM_ENTRIES_IMM_MEM_TABLES"}, + {rocksdb::DB::Properties::kEstimateTableReadersMem, + "NON_BLOCK_CACHE_SST_MEM_USAGE"}, + {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"}}; + + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + DBUG_ASSERT(rdb != nullptr); + + for (const auto &cf_name : cf_manager.get_cf_names()) { + rocksdb::ColumnFamilyHandle *cfh; + bool is_automatic; + + /* + Only the cf name is important. Whether it was generated automatically + does not matter, so is_automatic is ignored. + */ + cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + if (cfh == nullptr) + continue; + + for (const auto &property : cf_properties) { + if (!rdb->GetIntProperty(cfh, property.first, &val)) + continue; + + DBUG_ASSERT(tables != nullptr); + + tables->table->field[RDB_CFSTATS_FIELD::CF_NAME]->store( + cf_name.c_str(), cf_name.size(), system_charset_info); + tables->table->field[RDB_CFSTATS_FIELD::STAT_TYPE]->store( + property.second.c_str(), property.second.size(), system_charset_info); + tables->table->field[RDB_CFSTATS_FIELD::VALUE]->store(val, true); + + ret = my_core::schema_table_store_record(thd, tables->table); + + if (ret) + DBUG_RETURN(ret); + } + } + DBUG_RETURN(0); +} + +static int rdb_i_s_cfstats_init(void *p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_cfstats_fields_info; + schema->fill_table = rdb_i_s_cfstats_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_DBSTATS dynamic table + */ +namespace RDB_DBSTATS_FIELD { +enum { STAT_TYPE = 0, VALUE }; +} // namespace RDB_DBSTATS_FIELD + +static ST_FIELD_INFO rdb_i_s_dbstats_fields_info[] = { + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; + +static int rdb_i_s_dbstats_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + bool ret; + uint64_t val; + + const std::vector> db_properties = { + {rocksdb::DB::Properties::kBackgroundErrors, "DB_BACKGROUND_ERRORS"}, + {rocksdb::DB::Properties::kNumSnapshots, "DB_NUM_SNAPSHOTS"}, + {rocksdb::DB::Properties::kOldestSnapshotTime, + "DB_OLDEST_SNAPSHOT_TIME"}}; + + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + const rocksdb::BlockBasedTableOptions &table_options = + rdb_get_table_options(); + + for (const auto &property : db_properties) { + if (!rdb->GetIntProperty(property.first, &val)) + continue; + + DBUG_ASSERT(tables != nullptr); + + tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( + property.second.c_str(), property.second.size(), system_charset_info); + tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); + + ret = my_core::schema_table_store_record(thd, tables->table); + + if (ret) + DBUG_RETURN(ret); + } + + /* + Currently, this can only show the usage of a block cache allocated + directly by the handlerton. If the column family config specifies a block + cache (i.e. the column family option has a parameter such as + block_based_table_factory={block_cache=1G}), then the block cache is + allocated within the rocksdb::GetColumnFamilyOptionsFromString(). + + There is no interface to retrieve this block cache, nor fetch the usage + information from the column family. + */ + val = (table_options.block_cache ? table_options.block_cache->GetUsage() : 0); + tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( + STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), system_charset_info); + tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); + + ret = my_core::schema_table_store_record(thd, tables->table); + + DBUG_RETURN(ret); +} + +static int rdb_i_s_dbstats_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_dbstats_fields_info; + schema->fill_table = rdb_i_s_dbstats_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT dynamic table + */ +namespace RDB_PERF_CONTEXT_FIELD { +enum { TABLE_SCHEMA = 0, TABLE_NAME, PARTITION_NAME, STAT_TYPE, VALUE }; +} // namespace RDB_PERF_CONTEXT_FIELD + +static ST_FIELD_INFO rdb_i_s_perf_context_fields_info[] = { + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; + +static int rdb_i_s_perf_context_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + int ret = 0; + Field **field = tables->table->field; + + const std::vector tablenames = rdb_get_open_table_names(); + for (const auto &it : tablenames) { + std::string str, dbname, tablename, partname; + Rdb_perf_counters counters; + + if (rdb_normalize_tablename(it, &str)) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { + continue; + } + + if (rdb_get_table_perf_counters(it.c_str(), &counters)) { + continue; + } + + DBUG_ASSERT(field != nullptr); + + field[RDB_PERF_CONTEXT_FIELD::TABLE_SCHEMA]->store( + dbname.c_str(), dbname.size(), system_charset_info); + field[RDB_PERF_CONTEXT_FIELD::TABLE_NAME]->store( + tablename.c_str(), tablename.size(), system_charset_info); + if (partname.size() == 0) { + field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_null(); + } else { + field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_notnull(); + field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->store( + partname.c_str(), partname.size(), system_charset_info); + } + + for (int i = 0; i < PC_MAX_IDX; i++) { + field[RDB_PERF_CONTEXT_FIELD::STAT_TYPE]->store( + rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), + system_charset_info); + field[RDB_PERF_CONTEXT_FIELD::VALUE]->store(counters.m_value[i], true); + + ret = my_core::schema_table_store_record(thd, tables->table); + if (ret) + DBUG_RETURN(ret); + } + } + + DBUG_RETURN(0); +} + +static int rdb_i_s_perf_context_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_perf_context_fields_info; + schema->fill_table = rdb_i_s_perf_context_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_PERF_CONTEXT_GLOBAL dynamic table + */ +namespace RDB_PERF_CONTEXT_GLOBAL_FIELD { +enum { STAT_TYPE = 0, VALUE }; +} // namespace RDB_PERF_CONTEXT_GLOBAL_FIELD + +static ST_FIELD_INFO rdb_i_s_perf_context_global_fields_info[] = { + ROCKSDB_FIELD_INFO("STAT_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(uint64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO_END}; + +static int rdb_i_s_perf_context_global_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + int ret = 0; + + // Get a copy of the global perf counters. + Rdb_perf_counters global_counters; + rdb_get_global_perf_counters(&global_counters); + + for (int i = 0; i < PC_MAX_IDX; i++) { + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::STAT_TYPE]->store( + rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), + system_charset_info); + tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::VALUE]->store( + global_counters.m_value[i], true); + + ret = my_core::schema_table_store_record(thd, tables->table); + if (ret) + DBUG_RETURN(ret); + } + + DBUG_RETURN(0); +} + +static int rdb_i_s_perf_context_global_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_perf_context_global_fields_info; + schema->fill_table = rdb_i_s_perf_context_global_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_CFOPTIONS dynamic table + */ +namespace RDB_CFOPTIONS_FIELD { +enum { CF_NAME = 0, OPTION_TYPE, VALUE }; +} // namespace RDB_CFOPTIONS_FIELD + +static ST_FIELD_INFO rdb_i_s_cfoptions_fields_info[] = { + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("OPTION_TYPE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +static int rdb_i_s_cfoptions_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + bool ret; + + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + + for (const auto &cf_name : cf_manager.get_cf_names()) { + std::string val; + rocksdb::ColumnFamilyOptions opts; + cf_manager.get_cf_options(cf_name, &opts); + + std::vector> cf_option_types = { + {"COMPARATOR", opts.comparator == nullptr + ? "NULL" + : std::string(opts.comparator->Name())}, + {"MERGE_OPERATOR", opts.merge_operator == nullptr + ? "NULL" + : std::string(opts.merge_operator->Name())}, + {"COMPACTION_FILTER", + opts.compaction_filter == nullptr + ? "NULL" + : std::string(opts.compaction_filter->Name())}, + {"COMPACTION_FILTER_FACTORY", + opts.compaction_filter_factory == nullptr + ? "NULL" + : std::string(opts.compaction_filter_factory->Name())}, + {"WRITE_BUFFER_SIZE", std::to_string(opts.write_buffer_size)}, + {"MAX_WRITE_BUFFER_NUMBER", + std::to_string(opts.max_write_buffer_number)}, + {"MIN_WRITE_BUFFER_NUMBER_TO_MERGE", + std::to_string(opts.min_write_buffer_number_to_merge)}, + {"NUM_LEVELS", std::to_string(opts.num_levels)}, + {"LEVEL0_FILE_NUM_COMPACTION_TRIGGER", + std::to_string(opts.level0_file_num_compaction_trigger)}, + {"LEVEL0_SLOWDOWN_WRITES_TRIGGER", + std::to_string(opts.level0_slowdown_writes_trigger)}, + {"LEVEL0_STOP_WRITES_TRIGGER", + std::to_string(opts.level0_stop_writes_trigger)}, + {"MAX_MEM_COMPACTION_LEVEL", + std::to_string(opts.max_mem_compaction_level)}, + {"TARGET_FILE_SIZE_BASE", std::to_string(opts.target_file_size_base)}, + {"TARGET_FILE_SIZE_MULTIPLIER", + std::to_string(opts.target_file_size_multiplier)}, + {"MAX_BYTES_FOR_LEVEL_BASE", + std::to_string(opts.max_bytes_for_level_base)}, + {"LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES", + opts.level_compaction_dynamic_level_bytes ? "ON" : "OFF"}, + {"MAX_BYTES_FOR_LEVEL_MULTIPLIER", + std::to_string(opts.max_bytes_for_level_multiplier)}, + {"SOFT_RATE_LIMIT", std::to_string(opts.soft_rate_limit)}, + {"HARD_RATE_LIMIT", std::to_string(opts.hard_rate_limit)}, + {"RATE_LIMIT_DELAY_MAX_MILLISECONDS", + std::to_string(opts.rate_limit_delay_max_milliseconds)}, + {"ARENA_BLOCK_SIZE", std::to_string(opts.arena_block_size)}, + {"DISABLE_AUTO_COMPACTIONS", + opts.disable_auto_compactions ? "ON" : "OFF"}, + {"PURGE_REDUNDANT_KVS_WHILE_FLUSH", + opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"}, + {"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS", + std::to_string(opts.max_sequential_skip_in_iterations)}, + {"MEMTABLE_FACTORY", opts.memtable_factory == nullptr + ? "NULL" + : opts.memtable_factory->Name()}, + {"INPLACE_UPDATE_SUPPORT", opts.inplace_update_support ? "ON" : "OFF"}, + {"INPLACE_UPDATE_NUM_LOCKS", + opts.inplace_update_num_locks ? "ON" : "OFF"}, + {"MEMTABLE_PREFIX_BLOOM_BITS_RATIO", + std::to_string(opts.memtable_prefix_bloom_size_ratio)}, + {"MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE", + std::to_string(opts.memtable_huge_page_size)}, + {"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)}, + {"MAX_SUCCESSIVE_MERGES", std::to_string(opts.max_successive_merges)}, + {"OPTIMIZE_FILTERS_FOR_HITS", + (opts.optimize_filters_for_hits ? "ON" : "OFF")}, + }; + + // get MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL option value + val = opts.max_bytes_for_level_multiplier_additional.empty() ? "NULL" : ""; + for (const auto &level : opts.max_bytes_for_level_multiplier_additional) { + val.append(std::to_string(level) + ":"); + } + val.pop_back(); + cf_option_types.push_back( + {"MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL", val}); + + // get COMPRESSION_TYPE option value + GetStringFromCompressionType(&val, opts.compression); + if (val.empty()) { + val = "NULL"; + } + cf_option_types.push_back({"COMPRESSION_TYPE", val}); + + // get COMPRESSION_PER_LEVEL option value + val = opts.compression_per_level.empty() ? "NULL" : ""; + for (const auto &compression_type : opts.compression_per_level) { + std::string res; + GetStringFromCompressionType(&res, compression_type); + if (!res.empty()) { + val.append(res + ":"); + } + } + val.pop_back(); + cf_option_types.push_back({"COMPRESSION_PER_LEVEL", val}); + + // get compression_opts value + val = std::to_string(opts.compression_opts.window_bits) + ":"; + val.append(std::to_string(opts.compression_opts.level) + ":"); + val.append(std::to_string(opts.compression_opts.strategy)); + cf_option_types.push_back({"COMPRESSION_OPTS", val}); + + // bottommost_compression + if (opts.bottommost_compression) { + std::string res; + GetStringFromCompressionType(&res, opts.bottommost_compression); + if (!res.empty()) { + cf_option_types.push_back({"BOTTOMMOST_COMPRESSION", res}); + } + } + + // get PREFIX_EXTRACTOR option + cf_option_types.push_back( + {"PREFIX_EXTRACTOR", opts.prefix_extractor == nullptr + ? "NULL" + : std::string(opts.prefix_extractor->Name())}); + + // get COMPACTION_STYLE option + switch (opts.compaction_style) { + case rocksdb::kCompactionStyleLevel: + val = "kCompactionStyleLevel"; + break; + case rocksdb::kCompactionStyleUniversal: + val = "kCompactionStyleUniversal"; + break; + case rocksdb::kCompactionStyleFIFO: + val = "kCompactionStyleFIFO"; + break; + case rocksdb::kCompactionStyleNone: + val = "kCompactionStyleNone"; + break; + default: + val = "NULL"; + } + cf_option_types.push_back({"COMPACTION_STYLE", val}); + + // get COMPACTION_OPTIONS_UNIVERSAL related options + const rocksdb::CompactionOptionsUniversal compac_opts = + opts.compaction_options_universal; + val = "{SIZE_RATIO="; + val.append(std::to_string(compac_opts.size_ratio)); + val.append("; MIN_MERGE_WIDTH="); + val.append(std::to_string(compac_opts.min_merge_width)); + val.append("; MAX_MERGE_WIDTH="); + val.append(std::to_string(compac_opts.max_merge_width)); + val.append("; MAX_SIZE_AMPLIFICATION_PERCENT="); + val.append(std::to_string(compac_opts.max_size_amplification_percent)); + val.append("; COMPRESSION_SIZE_PERCENT="); + val.append(std::to_string(compac_opts.compression_size_percent)); + val.append("; STOP_STYLE="); + switch (compac_opts.stop_style) { + case rocksdb::kCompactionStopStyleSimilarSize: + val.append("kCompactionStopStyleSimilarSize}"); + break; + case rocksdb::kCompactionStopStyleTotalSize: + val.append("kCompactionStopStyleTotalSize}"); + break; + default: + val.append("}"); + } + cf_option_types.push_back({"COMPACTION_OPTIONS_UNIVERSAL", val}); + + // get COMPACTION_OPTION_FIFO option + cf_option_types.push_back( + {"COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE", + std::to_string(opts.compaction_options_fifo.max_table_files_size)}); + + // get block-based table related options + const rocksdb::BlockBasedTableOptions &table_options = + rdb_get_table_options(); + + // get BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS", + table_options.cache_index_and_filter_blocks ? "1" : "0"}); + + // get BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE option value + switch (table_options.index_type) { + case rocksdb::BlockBasedTableOptions::kBinarySearch: + val = "kBinarySearch"; + break; + case rocksdb::BlockBasedTableOptions::kHashSearch: + val = "kHashSearch"; + break; + default: + val = "NULL"; + } + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE", val}); + + // get BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION option value + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION", + table_options.hash_index_allow_collision ? "ON" : "OFF"}); + + // get BLOCK_BASED_TABLE_FACTORY::CHECKSUM option value + switch (table_options.checksum) { + case rocksdb::kNoChecksum: + val = "kNoChecksum"; + break; + case rocksdb::kCRC32c: + val = "kCRC32c"; + break; + case rocksdb::kxxHash: + val = "kxxHash"; + break; + default: + val = "NULL"; + } + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::CHECKSUM", val}); + + // get BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE option value + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE", + table_options.no_block_cache ? "ON" : "OFF"}); + + // get BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY", + table_options.filter_policy == nullptr + ? "NULL" + : std::string(table_options.filter_policy->Name())}); + + // get BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING", + table_options.whole_key_filtering ? "1" : "0"}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE", + table_options.block_cache == nullptr + ? "NULL" + : std::to_string(table_options.block_cache->GetUsage())}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED", + table_options.block_cache_compressed == nullptr + ? "NULL" + : std::to_string( + table_options.block_cache_compressed->GetUsage())}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE", + std::to_string(table_options.block_size)}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION", + std::to_string(table_options.block_size_deviation)}); + + // get BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL option + cf_option_types.push_back( + {"BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL", + std::to_string(table_options.block_restart_interval)}); + + // get BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION option + cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION", + std::to_string(table_options.format_version)}); + + for (const auto &cf_option_type : cf_option_types) { + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + tables->table->field[RDB_CFOPTIONS_FIELD::CF_NAME]->store( + cf_name.c_str(), cf_name.size(), system_charset_info); + tables->table->field[RDB_CFOPTIONS_FIELD::OPTION_TYPE]->store( + cf_option_type.first.c_str(), cf_option_type.first.size(), + system_charset_info); + tables->table->field[RDB_CFOPTIONS_FIELD::VALUE]->store( + cf_option_type.second.c_str(), cf_option_type.second.size(), + system_charset_info); + + ret = my_core::schema_table_store_record(thd, tables->table); + + if (ret) + DBUG_RETURN(ret); + } + } + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO dynamic table + */ +namespace RDB_GLOBAL_INFO_FIELD { +enum { TYPE = 0, NAME, VALUE }; +} + +static ST_FIELD_INFO rdb_i_s_global_info_fields_info[] = { + ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +/* + * helper function for rdb_i_s_global_info_fill_table + * to insert (TYPE, KEY, VALUE) rows into + * information_schema.rocksdb_global_info + */ +static int rdb_global_info_fill_row(my_core::THD *const thd, + my_core::TABLE_LIST *const tables, + const char *const type, + const char *const name, + const char *const value) { + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(type != nullptr); + DBUG_ASSERT(name != nullptr); + DBUG_ASSERT(value != nullptr); + + Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + + field[RDB_GLOBAL_INFO_FIELD::TYPE]->store(type, strlen(type), + system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::NAME]->store(name, strlen(name), + system_charset_info); + field[RDB_GLOBAL_INFO_FIELD::VALUE]->store(value, strlen(value), + system_charset_info); + + return my_core::schema_table_store_record(thd, tables->table); +} + +static int rdb_i_s_global_info_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + static const uint32_t INT_BUF_LEN = 21; + static const uint32_t GTID_BUF_LEN = 60; + static const uint32_t CF_ID_INDEX_BUF_LEN = 60; + + int ret = 0; + + /* binlog info */ + Rdb_binlog_manager *const blm = rdb_get_binlog_manager(); + DBUG_ASSERT(blm != nullptr); + + char file_buf[FN_REFLEN + 1] = {0}; + my_off_t pos = 0; + char pos_buf[INT_BUF_LEN] = {0}; + char gtid_buf[GTID_BUF_LEN] = {0}; + + if (blm->read(file_buf, &pos, gtid_buf)) { + snprintf(pos_buf, INT_BUF_LEN, "%llu", (ulonglong)pos); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "FILE", file_buf); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "POS", pos_buf); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "GTID", gtid_buf); + } + + /* max index info */ + const Rdb_dict_manager *const dict_manager = rdb_get_dict_manager(); + DBUG_ASSERT(dict_manager != nullptr); + + uint32_t max_index_id; + char max_index_id_buf[INT_BUF_LEN] = {0}; + + if (dict_manager->get_max_index_id(&max_index_id)) { + snprintf(max_index_id_buf, INT_BUF_LEN, "%u", max_index_id); + ret |= rdb_global_info_fill_row(thd, tables, "MAX_INDEX_ID", "MAX_INDEX_ID", + max_index_id_buf); + } + + /* cf_id -> cf_flags */ + char cf_id_buf[INT_BUF_LEN] = {0}; + char cf_value_buf[FN_REFLEN + 1] = {0}; + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { + uint flags; + dict_manager->get_cf_flags(cf_handle->GetID(), &flags); + snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID()); + snprintf(cf_value_buf, FN_REFLEN, "%s [%u]", cf_handle->GetName().c_str(), + flags); + ret |= rdb_global_info_fill_row(thd, tables, "CF_FLAGS", cf_id_buf, + cf_value_buf); + + if (ret) + break; + } + + /* DDL_DROP_INDEX_ONGOING */ + std::unordered_set gl_index_ids; + dict_manager->get_ongoing_index_operation( + &gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); + char cf_id_index_buf[CF_ID_INDEX_BUF_LEN] = {0}; + for (auto gl_index_id : gl_index_ids) { + snprintf(cf_id_index_buf, CF_ID_INDEX_BUF_LEN, "cf_id:%u,index_id:%u", + gl_index_id.cf_id, gl_index_id.index_id); + ret |= rdb_global_info_fill_row(thd, tables, "DDL_DROP_INDEX_ONGOING", + cf_id_index_buf, ""); + + if (ret) + break; + } + + DBUG_RETURN(ret); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS dynamic table + */ +static int rdb_i_s_compact_stats_fill_table( + my_core::THD *thd, my_core::TABLE_LIST *tables, + my_core::Item *cond MY_ATTRIBUTE((__unused__))) { + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + DBUG_ENTER_FUNC(); + + int ret = 0; + + rocksdb::DB *rdb = rdb_get_rocksdb_db(); + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + DBUG_ASSERT(rdb != nullptr); + + for (auto cf_name : cf_manager.get_cf_names()) { + rocksdb::ColumnFamilyHandle *cfh; + bool is_automatic; + /* + Only the cf name is important. Whether it was generated automatically + does not matter, so is_automatic is ignored. + */ + cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + if (cfh == nullptr) { + continue; + } + std::map props; + bool bool_ret MY_ATTRIBUTE((__unused__)); + bool_ret = rdb->GetMapProperty(cfh, "rocksdb.cfstats", &props); + DBUG_ASSERT(bool_ret); + + for (auto const &prop_ent : props) { + std::string prop_name = prop_ent.first; + double value = prop_ent.second; + std::size_t del_pos = prop_name.find('.'); + DBUG_ASSERT(del_pos != std::string::npos); + std::string level_str = prop_name.substr(0, del_pos); + std::string type_str = prop_name.substr(del_pos + 1); + + Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + field[0]->store(cf_name.c_str(), cf_name.size(), system_charset_info); + field[1]->store(level_str.c_str(), level_str.size(), system_charset_info); + field[2]->store(type_str.c_str(), type_str.size(), system_charset_info); + field[3]->store(value, true); + + ret |= my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + DBUG_RETURN(ret); + } + } + } + + DBUG_RETURN(ret); +} + +static ST_FIELD_INFO rdb_i_s_compact_stats_fields_info[] = { + ROCKSDB_FIELD_INFO("CF_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("LEVEL", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TYPE", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("VALUE", sizeof(double), MYSQL_TYPE_DOUBLE, 0), + ROCKSDB_FIELD_INFO_END}; + +namespace // anonymous namespace = not visible outside this source file +{ +struct Rdb_ddl_scanner : public Rdb_tables_scanner { + my_core::THD *m_thd; + my_core::TABLE *m_table; + + int add_table(Rdb_tbl_def *tdef) override; +}; +} // anonymous namespace + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_DDL dynamic table + */ +namespace RDB_DDL_FIELD { +enum { + TABLE_SCHEMA = 0, + TABLE_NAME, + PARTITION_NAME, + INDEX_NAME, + COLUMN_FAMILY, + INDEX_NUMBER, + INDEX_TYPE, + KV_FORMAT_VERSION, + CF +}; +} // namespace RDB_DDL_FIELD + +static ST_FIELD_INFO rdb_i_s_ddl_fields_info[] = { + ROCKSDB_FIELD_INFO("TABLE_SCHEMA", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("TABLE_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("PARTITION_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, + MY_I_S_MAYBE_NULL), + ROCKSDB_FIELD_INFO("INDEX_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_TYPE", sizeof(uint16_t), MYSQL_TYPE_SHORT, 0), + ROCKSDB_FIELD_INFO("KV_FORMAT_VERSION", sizeof(uint16_t), MYSQL_TYPE_SHORT, + 0), + ROCKSDB_FIELD_INFO("CF", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +int Rdb_ddl_scanner::add_table(Rdb_tbl_def *tdef) { + DBUG_ASSERT(tdef != nullptr); + + int ret = 0; + + DBUG_ASSERT(m_table != nullptr); + Field **field = m_table->field; + DBUG_ASSERT(field != nullptr); + + const std::string &dbname = tdef->base_dbname(); + field[RDB_DDL_FIELD::TABLE_SCHEMA]->store(dbname.c_str(), dbname.size(), + system_charset_info); + + const std::string &tablename = tdef->base_tablename(); + field[RDB_DDL_FIELD::TABLE_NAME]->store(tablename.c_str(), tablename.size(), + system_charset_info); + + const std::string &partname = tdef->base_partition(); + if (partname.length() == 0) { + field[RDB_DDL_FIELD::PARTITION_NAME]->set_null(); + } else { + field[RDB_DDL_FIELD::PARTITION_NAME]->set_notnull(); + field[RDB_DDL_FIELD::PARTITION_NAME]->store( + partname.c_str(), partname.size(), system_charset_info); + } + + for (uint i = 0; i < tdef->m_key_count; i++) { + const Rdb_key_def &kd = *tdef->m_key_descr_arr[i]; + + field[RDB_DDL_FIELD::INDEX_NAME]->store(kd.m_name.c_str(), kd.m_name.size(), + system_charset_info); + + GL_INDEX_ID gl_index_id = kd.get_gl_index_id(); + field[RDB_DDL_FIELD::COLUMN_FAMILY]->store(gl_index_id.cf_id, true); + field[RDB_DDL_FIELD::INDEX_NUMBER]->store(gl_index_id.index_id, true); + field[RDB_DDL_FIELD::INDEX_TYPE]->store(kd.m_index_type, true); + field[RDB_DDL_FIELD::KV_FORMAT_VERSION]->store(kd.m_kv_format_version, + true); + + std::string cf_name = kd.get_cf()->GetName(); + field[RDB_DDL_FIELD::CF]->store(cf_name.c_str(), cf_name.size(), + system_charset_info); + + ret = my_core::schema_table_store_record(m_thd, m_table); + if (ret) + return ret; + } + return HA_EXIT_SUCCESS; +} + +static int rdb_i_s_ddl_fill_table(my_core::THD *const thd, + my_core::TABLE_LIST *const tables, + my_core::Item *const cond) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + + Rdb_ddl_scanner ddl_arg; + ddl_arg.m_thd = thd; + ddl_arg.m_table = tables->table; + + Rdb_ddl_manager *ddl_manager = rdb_get_ddl_manager(); + DBUG_ASSERT(ddl_manager != nullptr); + int ret = ddl_manager->scan_for_tables(&ddl_arg); + + DBUG_RETURN(ret); +} + +static int rdb_i_s_ddl_init(void *const p) { + DBUG_ENTER_FUNC(); + + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ASSERT(p != nullptr); + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_ddl_fields_info; + schema->fill_table = rdb_i_s_ddl_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_cfoptions_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_cfoptions_fields_info; + schema->fill_table = rdb_i_s_cfoptions_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_global_info_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = reinterpret_cast(p); + + schema->fields_info = rdb_i_s_global_info_fields_info; + schema->fill_table = rdb_i_s_global_info_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_compact_stats_init(void *p) { + my_core::ST_SCHEMA_TABLE *schema; + + DBUG_ENTER_FUNC(); + DBUG_ASSERT(p != nullptr); + + schema = reinterpret_cast(p); + + schema->fields_info = rdb_i_s_compact_stats_fields_info; + schema->fill_table = rdb_i_s_compact_stats_fill_table; + + DBUG_RETURN(0); +} + +/* Given a path to a file return just the filename portion. */ +static std::string rdb_filename_without_path(const std::string &path) { + /* Find last slash in path */ + const size_t pos = path.rfind('/'); + + /* None found? Just return the original string */ + if (pos == std::string::npos) { + return std::string(path); + } + + /* Return everything after the slash (or backslash) */ + return path.substr(pos + 1); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP dynamic table + */ +namespace RDB_INDEX_FILE_MAP_FIELD { +enum { + COLUMN_FAMILY = 0, + INDEX_NUMBER, + SST_NAME, + NUM_ROWS, + DATA_SIZE, + ENTRY_DELETES, + ENTRY_SINGLEDELETES, + ENTRY_MERGES, + ENTRY_OTHERS, + DISTINCT_KEYS_PREFIX +}; +} // namespace RDB_INDEX_FILE_MAP_FIELD + +static ST_FIELD_INFO rdb_i_s_index_file_map_fields_info[] = { + /* The information_schema.rocksdb_index_file_map virtual table has four + * fields: + * COLUMN_FAMILY => the index's column family contained in the SST file + * INDEX_NUMBER => the index id contained in the SST file + * SST_NAME => the name of the SST file containing some indexes + * NUM_ROWS => the number of entries of this index id in this SST file + * DATA_SIZE => the data size stored in this SST file for this index id */ + ROCKSDB_FIELD_INFO("COLUMN_FAMILY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("INDEX_NUMBER", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("SST_NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NUM_ROWS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("DATA_SIZE", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_DELETES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("ENTRY_SINGLEDELETES", sizeof(int64_t), + MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_MERGES", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("ENTRY_OTHERS", sizeof(int64_t), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("DISTINCT_KEYS_PREFIX", MAX_REF_PARTS * 25, + MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +/* Fill the information_schema.rocksdb_index_file_map virtual table */ +static int rdb_i_s_index_file_map_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + + /* Iterate over all the column families */ + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + DBUG_ASSERT(rdb != nullptr); + + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { + /* Grab the the properties of all the tables in the column family */ + rocksdb::TablePropertiesCollection table_props_collection; + const rocksdb::Status s = + rdb->GetPropertiesOfAllTables(cf_handle, &table_props_collection); + if (!s.ok()) { + continue; + } + + /* Iterate over all the items in the collection, each of which contains a + * name and the actual properties */ + for (const auto &props : table_props_collection) { + /* Add the SST name into the output */ + const std::string sst_name = rdb_filename_without_path(props.first); + field[RDB_INDEX_FILE_MAP_FIELD::SST_NAME]->store( + sst_name.data(), sst_name.size(), system_charset_info); + + /* Get the __indexstats__ data out of the table property */ + std::vector stats; + Rdb_tbl_prop_coll::read_stats_from_tbl_props(props.second, &stats); + if (stats.empty()) { + field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::INDEX_NUMBER]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::NUM_ROWS]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::DATA_SIZE]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_DELETES]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_SINGLEDELETES]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store(-1, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store(-1, true); + } else { + for (auto it : stats) { + /* Add the index number, the number of rows, and data size to the + * output */ + field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store( + it.m_gl_index_id.cf_id, true); + field[RDB_INDEX_FILE_MAP_FIELD::INDEX_NUMBER]->store( + it.m_gl_index_id.index_id, true); + field[RDB_INDEX_FILE_MAP_FIELD::NUM_ROWS]->store(it.m_rows, true); + field[RDB_INDEX_FILE_MAP_FIELD::DATA_SIZE]->store(it.m_data_size, + true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_DELETES]->store( + it.m_entry_deletes, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_SINGLEDELETES]->store( + it.m_entry_single_deletes, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store( + it.m_entry_merges, true); + field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store( + it.m_entry_others, true); + std::string distinct_keys_prefix; + + for (size_t i = 0; i < it.m_distinct_keys_per_prefix.size(); i++) { + if (i > 0) { + distinct_keys_prefix += ","; + } + distinct_keys_prefix += + std::to_string(it.m_distinct_keys_per_prefix[i]); + } + + field[RDB_INDEX_FILE_MAP_FIELD::DISTINCT_KEYS_PREFIX]->store( + distinct_keys_prefix.data(), distinct_keys_prefix.size(), + system_charset_info); + + /* Tell MySQL about this row in the virtual table */ + ret = my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + } + } + } + + DBUG_RETURN(ret); +} + +/* Initialize the information_schema.rocksdb_index_file_map virtual table */ +static int rdb_i_s_index_file_map_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_index_file_map_fields_info; + schema->fill_table = rdb_i_s_index_file_map_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_LOCKS dynamic table + */ +namespace RDB_LOCKS_FIELD { +enum { COLUMN_FAMILY_ID = 0, TRANSACTION_ID, KEY, MODE }; +} // namespace RDB_LOCKS_FIELD + +static ST_FIELD_INFO rdb_i_s_lock_info_fields_info[] = { + ROCKSDB_FIELD_INFO("COLUMN_FAMILY_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, + 0), + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("KEY", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("MODE", 32, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +/* Fill the information_schema.rocksdb_locks virtual table */ +static int rdb_i_s_lock_info_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + + rocksdb::TransactionDB *const rdb = rdb_get_rocksdb_db(); + DBUG_ASSERT(rdb != nullptr); + + /* cf id -> rocksdb::KeyLockInfo */ + std::unordered_multimap lock_info = + rdb->GetLockStatusData(); + + for (const auto &lock : lock_info) { + const uint32_t cf_id = lock.first; + const auto &key_lock_info = lock.second; + const auto key_hexstr = rdb_hexdump(key_lock_info.key.c_str(), + key_lock_info.key.length(), FN_REFLEN); + + for (const auto &id : key_lock_info.ids) { + tables->table->field[RDB_LOCKS_FIELD::COLUMN_FAMILY_ID]->store(cf_id, + true); + tables->table->field[RDB_LOCKS_FIELD::TRANSACTION_ID]->store(id, true); + + tables->table->field[RDB_LOCKS_FIELD::KEY]->store( + key_hexstr.c_str(), key_hexstr.size(), system_charset_info); + tables->table->field[RDB_LOCKS_FIELD::MODE]->store( + key_lock_info.exclusive ? "X" : "S", 1, system_charset_info); + + /* Tell MySQL about this row in the virtual table */ + ret = my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + } + DBUG_RETURN(ret); +} + +/* Initialize the information_schema.rocksdb_lock_info virtual table */ +static int rdb_i_s_lock_info_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_lock_info_fields_info; + schema->fill_table = rdb_i_s_lock_info_fill_table; + + DBUG_RETURN(0); +} + +/* + Support for INFORMATION_SCHEMA.ROCKSDB_TRX dynamic table + */ +namespace RDB_TRX_FIELD { +enum { + TRANSACTION_ID = 0, + STATE, + NAME, + WRITE_COUNT, + LOCK_COUNT, + TIMEOUT_SEC, + WAITING_KEY, + WAITING_COLUMN_FAMILY_ID, + IS_REPLICATION, + SKIP_TRX_API, + READ_ONLY, + HAS_DEADLOCK_DETECTION, + NUM_ONGOING_BULKLOAD, + THREAD_ID, + QUERY +}; +} // namespace RDB_TRX_FIELD + +static ST_FIELD_INFO rdb_i_s_trx_info_fields_info[] = { + ROCKSDB_FIELD_INFO("TRANSACTION_ID", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("STATE", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("NAME", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WRITE_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, + 0), + ROCKSDB_FIELD_INFO("LOCK_COUNT", sizeof(ulonglong), MYSQL_TYPE_LONGLONG, 0), + ROCKSDB_FIELD_INFO("TIMEOUT_SEC", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("WAITING_KEY", FN_REFLEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO("WAITING_COLUMN_FAMILY_ID", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("IS_REPLICATION", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("SKIP_TRX_API", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("READ_ONLY", sizeof(uint32_t), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("HAS_DEADLOCK_DETECTION", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("NUM_ONGOING_BULKLOAD", sizeof(uint32_t), + MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("THREAD_ID", sizeof(ulong), MYSQL_TYPE_LONG, 0), + ROCKSDB_FIELD_INFO("QUERY", NAME_LEN + 1, MYSQL_TYPE_STRING, 0), + ROCKSDB_FIELD_INFO_END}; + +/* Fill the information_schema.rocksdb_trx virtual table */ +static int rdb_i_s_trx_info_fill_table( + my_core::THD *const thd, my_core::TABLE_LIST *const tables, + my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + + const std::vector &all_trx_info = rdb_get_all_trx_info(); + + for (const auto &info : all_trx_info) { + auto name_hexstr = + rdb_hexdump(info.name.c_str(), info.name.length(), NAME_LEN); + auto key_hexstr = rdb_hexdump(info.waiting_key.c_str(), + info.waiting_key.length(), FN_REFLEN); + tables->table->field[RDB_TRX_FIELD::TRANSACTION_ID]->store(info.trx_id, + true); + tables->table->field[RDB_TRX_FIELD::STATE]->store( + info.state.c_str(), info.state.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::NAME]->store( + name_hexstr.c_str(), name_hexstr.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::WRITE_COUNT]->store(info.write_count, + true); + tables->table->field[RDB_TRX_FIELD::LOCK_COUNT]->store(info.lock_count, + true); + tables->table->field[RDB_TRX_FIELD::TIMEOUT_SEC]->store(info.timeout_sec, + false); + tables->table->field[RDB_TRX_FIELD::WAITING_KEY]->store( + key_hexstr.c_str(), key_hexstr.length(), system_charset_info); + tables->table->field[RDB_TRX_FIELD::WAITING_COLUMN_FAMILY_ID]->store( + info.waiting_cf_id, true); + tables->table->field[RDB_TRX_FIELD::IS_REPLICATION]->store( + info.is_replication, false); + tables->table->field[RDB_TRX_FIELD::SKIP_TRX_API]->store(info.skip_trx_api, + false); + tables->table->field[RDB_TRX_FIELD::READ_ONLY]->store(info.read_only, + false); + tables->table->field[RDB_TRX_FIELD::HAS_DEADLOCK_DETECTION]->store( + info.deadlock_detect, false); + tables->table->field[RDB_TRX_FIELD::NUM_ONGOING_BULKLOAD]->store( + info.num_ongoing_bulk_load, false); + tables->table->field[RDB_TRX_FIELD::THREAD_ID]->store(info.thread_id, true); + tables->table->field[RDB_TRX_FIELD::QUERY]->store( + info.query_str.c_str(), info.query_str.length(), system_charset_info); + + /* Tell MySQL about this row in the virtual table */ + ret = my_core::schema_table_store_record(thd, tables->table); + if (ret != 0) { + break; + } + } + + DBUG_RETURN(ret); +} + +/* Initialize the information_schema.rocksdb_trx_info virtual table */ +static int rdb_i_s_trx_info_init(void *const p) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(p != nullptr); + + my_core::ST_SCHEMA_TABLE *schema; + + schema = (my_core::ST_SCHEMA_TABLE *)p; + + schema->fields_info = rdb_i_s_trx_info_fields_info; + schema->fill_table = rdb_i_s_trx_info_fill_table; + + DBUG_RETURN(0); +} + +static int rdb_i_s_deinit(void *p MY_ATTRIBUTE((__unused__))) { + DBUG_ENTER_FUNC(); + DBUG_RETURN(0); +} + +static struct st_mysql_information_schema rdb_i_s_info = { + MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION}; + +struct st_maria_plugin rdb_i_s_cfstats = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_CFSTATS", + "Facebook", + "RocksDB column family stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_cfstats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_dbstats = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DBSTATS", + "Facebook", + "RocksDB database stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_dbstats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_perf_context = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_PERF_CONTEXT", + "Facebook", + "RocksDB perf context stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_perf_context_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_perf_context_global = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_PERF_CONTEXT_GLOBAL", + "Facebook", + "RocksDB perf context stats (all)", + PLUGIN_LICENSE_GPL, + rdb_i_s_perf_context_global_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_cfoptions = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_CF_OPTIONS", + "Facebook", + "RocksDB column family options", + PLUGIN_LICENSE_GPL, + rdb_i_s_cfoptions_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_global_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_GLOBAL_INFO", + "Facebook", + "RocksDB global info", + PLUGIN_LICENSE_GPL, + rdb_i_s_global_info_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_compact_stats = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_COMPACTION_STATS", + "Facebook", + "RocksDB compaction stats", + PLUGIN_LICENSE_GPL, + rdb_i_s_compact_stats_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_ddl = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_DDL", + "Facebook", + "RocksDB Data Dictionary", + PLUGIN_LICENSE_GPL, + rdb_i_s_ddl_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_index_file_map = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_INDEX_FILE_MAP", + "Facebook", + "RocksDB index file map", + PLUGIN_LICENSE_GPL, + rdb_i_s_index_file_map_init, + rdb_i_s_deinit, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_lock_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_LOCKS", + "Facebook", + "RocksDB lock information", + PLUGIN_LICENSE_GPL, + rdb_i_s_lock_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; + +struct st_maria_plugin rdb_i_s_trx_info = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &rdb_i_s_info, + "ROCKSDB_TRX", + "Facebook", + "RocksDB transaction information", + PLUGIN_LICENSE_GPL, + rdb_i_s_trx_info_init, + nullptr, + 0x0001, /* version number (0.1) */ + nullptr, /* status variables */ + nullptr, /* system variables */ + nullptr, /* config options */ + 0, /* flags */ +}; +} // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.h b/storage/rocksdb/rdb_i_s.h new file mode 100644 index 0000000000000..08d35e17ba9b0 --- /dev/null +++ b/storage/rocksdb/rdb_i_s.h @@ -0,0 +1,35 @@ +/* + Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +namespace myrocks { + +/* + Declare INFORMATION_SCHEMA (I_S) plugins needed by MyRocks storage engine. +*/ + +extern struct st_maria_plugin rdb_i_s_cfstats; +extern struct st_maria_plugin rdb_i_s_dbstats; +extern struct st_maria_plugin rdb_i_s_perf_context; +extern struct st_maria_plugin rdb_i_s_perf_context_global; +extern struct st_maria_plugin rdb_i_s_cfoptions; +extern struct st_maria_plugin rdb_i_s_compact_stats; +extern struct st_maria_plugin rdb_i_s_global_info; +extern struct st_maria_plugin rdb_i_s_ddl; +extern struct st_maria_plugin rdb_i_s_index_file_map; +extern struct st_maria_plugin rdb_i_s_lock_info; +extern struct st_maria_plugin rdb_i_s_trx_info; +} // namespace myrocks diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc new file mode 100644 index 0000000000000..b2bab1f4a18eb --- /dev/null +++ b/storage/rocksdb/rdb_index_merge.cc @@ -0,0 +1,587 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include + +/* This C++ file's header file */ +#include "./rdb_index_merge.h" + +/* MySQL header files */ +#include "../sql/sql_class.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./rdb_datadic.h" + +namespace myrocks { + +Rdb_index_merge::Rdb_index_merge(const char *const tmpfile_path, + const ulonglong &merge_buf_size, + const ulonglong &merge_combine_read_size, + const rocksdb::Comparator *const comparator) + : m_tmpfile_path(tmpfile_path), m_merge_buf_size(merge_buf_size), + m_merge_combine_read_size(merge_combine_read_size), + m_comparator(comparator), m_rec_buf_unsorted(nullptr), + m_output_buf(nullptr) {} + +Rdb_index_merge::~Rdb_index_merge() { + /* + Close tmp file, we don't need to worry about deletion, mysql handles it. + */ + my_close(m_merge_file.fd, MYF(MY_WME)); +} + +int Rdb_index_merge::init() { + /* + Create a temporary merge file on disk to store sorted chunks during + inplace index creation. + */ + if (merge_file_create()) { + return HA_ERR_INTERNAL_ERROR; + } + + /* + Then, allocate buffer to store unsorted records before they are written + to disk. They will be written to disk sorted. A sorted tree is used to + keep track of the offset of each record within the unsorted buffer. + */ + m_rec_buf_unsorted = + std::shared_ptr(new merge_buf_info(m_merge_buf_size)); + + /* + Allocate output buffer that will contain sorted block that is written to + disk. + */ + m_output_buf = + std::shared_ptr(new merge_buf_info(m_merge_buf_size)); + + return HA_EXIT_SUCCESS; +} + +/** + Create a merge file in the given location. +*/ +int Rdb_index_merge::merge_file_create() { + DBUG_ASSERT(m_merge_file.fd == -1); + + int fd; +#ifdef MARIAROCKS_NOT_YET // mysql_tmpfile_path use + /* If no path set for tmpfile, use mysql_tmpdir by default */ + if (m_tmpfile_path == nullptr) { + fd = mysql_tmpfile("myrocks"); + } else { + fd = mysql_tmpfile_path(m_tmpfile_path, "myrocks"); + } +#else + fd = mysql_tmpfile("myrocks"); +#endif + if (fd < 0) { + return HA_ERR_INTERNAL_ERROR; + } + + m_merge_file.fd = fd; + m_merge_file.num_sort_buffers = 0; + + return HA_EXIT_SUCCESS; +} + +/** + Add record to offset tree (and unsorted merge buffer) in preparation for + writing out to disk in sorted chunks. + + If buffer in memory is full, write the buffer out to disk sorted using the + offset tree, and clear the tree. (Happens in merge_buf_write) +*/ +int Rdb_index_merge::add(const rocksdb::Slice &key, const rocksdb::Slice &val) { + /* Adding a record after heap is already created results in error */ + DBUG_ASSERT(m_merge_min_heap.empty()); + + /* + Check if sort buffer is going to be out of space, if so write it + out to disk in sorted order using offset tree. + */ + const uint total_offset = RDB_MERGE_CHUNK_LEN + + m_rec_buf_unsorted->curr_offset + + RDB_MERGE_KEY_DELIMITER + RDB_MERGE_VAL_DELIMITER + + key.size() + val.size(); + if (total_offset >= m_rec_buf_unsorted->total_size) { + /* + If the offset tree is empty here, that means that the proposed key to + add is too large for the buffer. + */ + if (m_offset_tree.empty()) { + // NO_LINT_DEBUG + sql_print_error("Sort buffer size is too small to process merge. " + "Please set merge buffer size to a higher value."); + return HA_ERR_INTERNAL_ERROR; + } + + if (merge_buf_write()) { + // NO_LINT_DEBUG + sql_print_error("Error writing sort buffer to disk."); + return HA_ERR_INTERNAL_ERROR; + } + } + + const ulonglong rec_offset = m_rec_buf_unsorted->curr_offset; + + /* + Store key and value in temporary unsorted in memory buffer pointed to by + offset tree. + */ + m_rec_buf_unsorted->store_key_value(key, val); + + /* Find sort order of the new record */ + m_offset_tree.emplace(m_rec_buf_unsorted->block.get() + rec_offset, + m_comparator); + + return HA_EXIT_SUCCESS; +} + +/** + Sort + write merge buffer chunk out to disk. +*/ +int Rdb_index_merge::merge_buf_write() { + DBUG_ASSERT(m_merge_file.fd != -1); + DBUG_ASSERT(m_rec_buf_unsorted != nullptr); + DBUG_ASSERT(m_output_buf != nullptr); + DBUG_ASSERT(!m_offset_tree.empty()); + + /* Write actual chunk size to first 8 bytes of the merge buffer */ + merge_store_uint64(m_output_buf->block.get(), + m_rec_buf_unsorted->curr_offset + RDB_MERGE_CHUNK_LEN); + m_output_buf->curr_offset += RDB_MERGE_CHUNK_LEN; + + /* + Iterate through the offset tree. Should be ordered by the secondary key + at this point. + */ + for (const auto &rec : m_offset_tree) { + DBUG_ASSERT(m_output_buf->curr_offset <= m_merge_buf_size); + + /* Read record from offset (should never fail) */ + rocksdb::Slice key; + rocksdb::Slice val; + merge_read_rec(rec.block, &key, &val); + + /* Store key and value into sorted output buffer */ + m_output_buf->store_key_value(key, val); + } + + DBUG_ASSERT(m_output_buf->curr_offset <= m_output_buf->total_size); + + /* + Write output buffer to disk. + + Need to position cursor to the chunk it needs to be at on filesystem + then write into the respective merge buffer. + */ + if (my_seek(m_merge_file.fd, m_merge_file.num_sort_buffers * m_merge_buf_size, + SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { + // NO_LINT_DEBUG + sql_print_error("Error seeking to location in merge file on disk."); + return HA_ERR_INTERNAL_ERROR; + } + + /* + Add a file sync call here to flush the data out. Otherwise, the filesystem + cache can flush out all of the files at the same time, causing a write + burst. + */ + if (my_write(m_merge_file.fd, m_output_buf->block.get(), + m_output_buf->total_size, MYF(MY_WME | MY_NABP)) || + mysql_file_sync(m_merge_file.fd, MYF(MY_WME))) { + // NO_LINT_DEBUG + sql_print_error("Error writing sorted merge buffer to disk."); + return HA_ERR_INTERNAL_ERROR; + } + + /* Increment merge file offset to track number of merge buffers written */ + m_merge_file.num_sort_buffers += 1; + + /* Reset everything for next run */ + merge_reset(); + + return HA_EXIT_SUCCESS; +} + +/** + Prepare n-way merge of n sorted buffers on disk, using a heap sorted by + secondary key records. +*/ +int Rdb_index_merge::merge_heap_prepare() { + DBUG_ASSERT(m_merge_min_heap.empty()); + + /* + If the offset tree is not empty, there are still some records that need to + be written to disk. Write them out now. + */ + if (!m_offset_tree.empty() && merge_buf_write()) { + return HA_ERR_INTERNAL_ERROR; + } + + DBUG_ASSERT(m_merge_file.num_sort_buffers > 0); + + /* + For an n-way merge, we need to read chunks of each merge file + simultaneously. + */ + ulonglong chunk_size = + m_merge_combine_read_size / m_merge_file.num_sort_buffers; + if (chunk_size >= m_merge_buf_size) { + chunk_size = m_merge_buf_size; + } + + /* Allocate buffers for each chunk */ + for (ulonglong i = 0; i < m_merge_file.num_sort_buffers; i++) { + const auto entry = std::make_shared(m_comparator); + + /* + Read chunk_size bytes from each chunk on disk, and place inside + respective chunk buffer. + */ + const size_t total_size = + entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size); + + if (total_size == (size_t)-1) { + return HA_ERR_INTERNAL_ERROR; + } + + /* Can reach this condition if an index was added on table w/ no rows */ + if (total_size - RDB_MERGE_CHUNK_LEN == 0) { + break; + } + + /* Read the first record from each buffer to initially populate the heap */ + if (entry->read_rec(&entry->key, &entry->val)) { + // NO_LINT_DEBUG + sql_print_error("Chunk size is too small to process merge."); + return HA_ERR_INTERNAL_ERROR; + } + + m_merge_min_heap.push(std::move(entry)); + } + + return HA_EXIT_SUCCESS; +} + +/** + Create and/or iterate through keys in the merge heap. +*/ +int Rdb_index_merge::next(rocksdb::Slice *const key, + rocksdb::Slice *const val) { + /* + If table fits in one sort buffer, we can optimize by writing + the sort buffer directly through to the sstfilewriter instead of + needing to create tmp files/heap to merge the sort buffers. + + If there are no sort buffer records (alters on empty tables), + also exit here. + */ + if (m_merge_file.num_sort_buffers == 0) { + if (m_offset_tree.empty()) { + return -1; + } + + const auto rec = m_offset_tree.begin(); + + /* Read record from offset */ + merge_read_rec(rec->block, key, val); + + m_offset_tree.erase(rec); + return HA_EXIT_SUCCESS; + } + + int res; + + /* + If heap and heap chunk info are empty, we must be beginning the merge phase + of the external sort. Populate the heap with initial values from each + disk chunk. + */ + if (m_merge_min_heap.empty()) { + if ((res = merge_heap_prepare())) { + // NO_LINT_DEBUG + sql_print_error("Error during preparation of heap."); + return res; + } + + /* + Return the first top record without popping, as we haven't put this + inside the SST file yet. + */ + merge_heap_top(key, val); + return HA_EXIT_SUCCESS; + } + + DBUG_ASSERT(!m_merge_min_heap.empty()); + return merge_heap_pop_and_get_next(key, val); +} + +/** + Get current top record from the heap. +*/ +void Rdb_index_merge::merge_heap_top(rocksdb::Slice *const key, + rocksdb::Slice *const val) { + DBUG_ASSERT(!m_merge_min_heap.empty()); + + const std::shared_ptr &entry = m_merge_min_heap.top(); + *key = entry->key; + *val = entry->val; +} + +/** + Pops the top record, and uses it to read next record from the + corresponding sort buffer and push onto the heap. + + Returns -1 when there are no more records in the heap. +*/ +int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice *const key, + rocksdb::Slice *const val) { + /* + Make a new reference to shared ptr so it doesn't get destroyed + during pop(). We are going to push this entry back onto the heap. + */ + const std::shared_ptr entry = m_merge_min_heap.top(); + m_merge_min_heap.pop(); + + /* + We are finished w/ current chunk if: + current_offset + disk_offset == total_size + + Return without adding entry back onto heap. + If heap is also empty, we must be finished with merge. + */ + if (entry->chunk_info->is_chunk_finished()) { + if (m_merge_min_heap.empty()) { + return -1; + } + + merge_heap_top(key, val); + return HA_EXIT_SUCCESS; + } + + /* + Make sure we haven't reached the end of the chunk. + */ + DBUG_ASSERT(!entry->chunk_info->is_chunk_finished()); + + /* + If merge_read_rec fails, it means the either the chunk was cut off + or we've reached the end of the respective chunk. + */ + if (entry->read_rec(&entry->key, &entry->val)) { + if (entry->read_next_chunk_from_disk(m_merge_file.fd)) { + return HA_ERR_INTERNAL_ERROR; + } + + /* Try reading record again, should never fail. */ + if (entry->read_rec(&entry->key, &entry->val)) { + return HA_ERR_INTERNAL_ERROR; + } + } + + /* Push entry back on to the heap w/ updated buffer + offset ptr */ + m_merge_min_heap.push(std::move(entry)); + + /* Return the current top record on heap */ + merge_heap_top(key, val); + return HA_EXIT_SUCCESS; +} + +int Rdb_index_merge::merge_heap_entry::read_next_chunk_from_disk(File fd) { + if (chunk_info->read_next_chunk_from_disk(fd)) { + return HA_EXIT_FAILURE; + } + + block = chunk_info->block.get(); + return HA_EXIT_SUCCESS; +} + +int Rdb_index_merge::merge_buf_info::read_next_chunk_from_disk(File fd) { + disk_curr_offset += curr_offset; + + if (my_seek(fd, disk_curr_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { + // NO_LINT_DEBUG + sql_print_error("Error seeking to location in merge file on disk."); + return HA_EXIT_FAILURE; + } + + /* Overwrite the old block */ + const size_t bytes_read = my_read(fd, block.get(), block_len, MYF(MY_WME)); + if (bytes_read == (size_t)-1) { + // NO_LINT_DEBUG + sql_print_error("Error reading merge file from disk."); + return HA_EXIT_FAILURE; + } + + curr_offset = 0; + return HA_EXIT_SUCCESS; +} + +/** + Get records from offset within sort buffer and compare them. + Sort by least to greatest. +*/ +int Rdb_index_merge::merge_record_compare( + const uchar *const a_block, const uchar *const b_block, + const rocksdb::Comparator *const comparator) { + return comparator->Compare(as_slice(a_block), as_slice(b_block)); +} + +/** + Given an offset in a merge sort buffer, read out the keys + values. + After this, block will point to the next record in the buffer. +**/ +void Rdb_index_merge::merge_read_rec(const uchar *const block, + rocksdb::Slice *const key, + rocksdb::Slice *const val) { + /* Read key at block offset into key slice and the value into value slice*/ + read_slice(key, block); + read_slice(val, block + RDB_MERGE_REC_DELIMITER + key->size()); +} + +void Rdb_index_merge::read_slice(rocksdb::Slice *slice, + const uchar *block_ptr) { + uint64 slice_len; + merge_read_uint64(&block_ptr, &slice_len); + + *slice = rocksdb::Slice(reinterpret_cast(block_ptr), slice_len); +} + +int Rdb_index_merge::merge_heap_entry::read_rec(rocksdb::Slice *const key, + rocksdb::Slice *const val) { + const uchar *block_ptr = block; + const auto orig_offset = chunk_info->curr_offset; + const auto orig_block = block; + + /* Read key at block offset into key slice and the value into value slice*/ + if (read_slice(key, &block_ptr) != 0) { + return HA_EXIT_FAILURE; + } + + chunk_info->curr_offset += (uintptr_t)block_ptr - (uintptr_t)block; + block += (uintptr_t)block_ptr - (uintptr_t)block; + + if (read_slice(val, &block_ptr) != 0) { + chunk_info->curr_offset = orig_offset; + block = orig_block; + return HA_EXIT_FAILURE; + } + + chunk_info->curr_offset += (uintptr_t)block_ptr - (uintptr_t)block; + block += (uintptr_t)block_ptr - (uintptr_t)block; + + return HA_EXIT_SUCCESS; +} + +int Rdb_index_merge::merge_heap_entry::read_slice(rocksdb::Slice *const slice, + const uchar **block_ptr) { + if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER)) { + return HA_EXIT_FAILURE; + } + + uint64 slice_len; + merge_read_uint64(block_ptr, &slice_len); + if (!chunk_info->has_space(RDB_MERGE_REC_DELIMITER + slice_len)) { + return HA_EXIT_FAILURE; + } + + *slice = + rocksdb::Slice(reinterpret_cast(*block_ptr), slice_len); + *block_ptr += slice_len; + return HA_EXIT_SUCCESS; +} + +size_t Rdb_index_merge::merge_heap_entry::prepare(File fd, ulonglong f_offset, + ulonglong chunk_size) { + chunk_info = std::make_shared(chunk_size); + const size_t res = chunk_info->prepare(fd, f_offset); + if (res != (size_t)-1) { + block = chunk_info->block.get() + RDB_MERGE_CHUNK_LEN; + } + + return res; +} + +size_t Rdb_index_merge::merge_buf_info::prepare(File fd, ulonglong f_offset) { + disk_start_offset = f_offset; + disk_curr_offset = f_offset; + + /* + Need to position cursor to the chunk it needs to be at on filesystem + then read 'chunk_size' bytes into the respective chunk buffer. + */ + if (my_seek(fd, f_offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { + // NO_LINT_DEBUG + sql_print_error("Error seeking to location in merge file on disk."); + return (size_t)-1; + } + + const size_t bytes_read = my_read(fd, block.get(), total_size, MYF(MY_WME)); + if (bytes_read == (size_t)-1) { + // NO_LINT_DEBUG + sql_print_error("Error reading merge file from disk."); + return (size_t)-1; + } + + /* + Read the first 8 bytes of each chunk, this gives us the actual + size of each chunk. + */ + const uchar *block_ptr = block.get(); + merge_read_uint64(&block_ptr, &total_size); + curr_offset += RDB_MERGE_CHUNK_LEN; + return total_size; +} + +/* Store key and value w/ their respective delimiters at the given offset */ +void Rdb_index_merge::merge_buf_info::store_key_value( + const rocksdb::Slice &key, const rocksdb::Slice &val) { + store_slice(key); + store_slice(val); +} + +void Rdb_index_merge::merge_buf_info::store_slice(const rocksdb::Slice &slice) { + /* Store length delimiter */ + merge_store_uint64(&block[curr_offset], slice.size()); + + /* Store slice data */ + memcpy(&block[curr_offset + RDB_MERGE_REC_DELIMITER], slice.data(), + slice.size()); + + curr_offset += slice.size() + RDB_MERGE_REC_DELIMITER; +} + +void Rdb_index_merge::merge_reset() { + /* + Either error, or all values in the sort buffer have been written to disk, + so we need to clear the offset tree. + */ + m_offset_tree.clear(); + + /* Reset sort buffer block */ + if (m_rec_buf_unsorted && m_rec_buf_unsorted->block) { + m_rec_buf_unsorted->curr_offset = 0; + } + + /* Reset output buf */ + if (m_output_buf && m_output_buf->block) { + m_output_buf->curr_offset = 0; + } +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_index_merge.h b/storage/rocksdb/rdb_index_merge.h new file mode 100644 index 0000000000000..9d1469fc34e25 --- /dev/null +++ b/storage/rocksdb/rdb_index_merge.h @@ -0,0 +1,218 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* MySQL header files */ +#include "../sql/log.h" +#include "./handler.h" /* handler */ +#include "./my_global.h" /* ulonglong */ + +/* C++ standard header files */ +#include +#include +#include + +/* RocksDB header files */ +#include "rocksdb/db.h" + +/* MyRocks header files */ +#include "./rdb_comparator.h" + +namespace myrocks { + +/* + Length of delimiters used during inplace index creation. +*/ +#define RDB_MERGE_CHUNK_LEN sizeof(size_t) +#define RDB_MERGE_REC_DELIMITER sizeof(size_t) +#define RDB_MERGE_KEY_DELIMITER RDB_MERGE_REC_DELIMITER +#define RDB_MERGE_VAL_DELIMITER RDB_MERGE_REC_DELIMITER + +class Rdb_key_def; +class Rdb_tbl_def; + +class Rdb_index_merge { + Rdb_index_merge(const Rdb_index_merge &p) = delete; + Rdb_index_merge &operator=(const Rdb_index_merge &p) = delete; + +public: + /* Information about temporary files used in external merge sort */ + struct merge_file_info { + File fd = -1; /* file descriptor */ + ulong num_sort_buffers; /* number of sort buffers in temp file */ + }; + + /* Buffer for sorting in main memory. */ + struct merge_buf_info { + /* heap memory allocated for main memory sort/merge */ + std::unique_ptr block; + const ulonglong + block_len; /* amount of data bytes allocated for block above */ + ulonglong curr_offset; /* offset of the record pointer for the block */ + ulonglong disk_start_offset; /* where the chunk starts on disk */ + ulonglong disk_curr_offset; /* current offset on disk */ + ulonglong total_size; /* total # of data bytes in chunk */ + + void store_key_value(const rocksdb::Slice &key, const rocksdb::Slice &val) + MY_ATTRIBUTE((__nonnull__)); + + void store_slice(const rocksdb::Slice &slice) MY_ATTRIBUTE((__nonnull__)); + + size_t prepare(File fd, ulonglong f_offset) MY_ATTRIBUTE((__nonnull__)); + + int read_next_chunk_from_disk(File fd) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + inline bool is_chunk_finished() const { + return curr_offset + disk_curr_offset - disk_start_offset == total_size; + } + + inline bool has_space(uint64 needed) const { + return curr_offset + needed <= block_len; + } + + explicit merge_buf_info(const ulonglong merge_block_size) + : block(nullptr), block_len(merge_block_size), curr_offset(0), + disk_start_offset(0), disk_curr_offset(0), + total_size(merge_block_size) { + /* Will throw an exception if it runs out of memory here */ + block = std::unique_ptr(new uchar[merge_block_size]); + + /* Initialize entire buffer to 0 to avoid valgrind errors */ + memset(block.get(), 0, merge_block_size); + } + }; + + /* Represents an entry in the heap during merge phase of external sort */ + struct merge_heap_entry { + std::shared_ptr chunk_info; /* pointer to buffer info */ + uchar *block; /* pointer to heap memory where record is stored */ + const rocksdb::Comparator *const comparator; + rocksdb::Slice key; /* current key pointed to by block ptr */ + rocksdb::Slice val; + + size_t prepare(File fd, ulonglong f_offset, ulonglong chunk_size) + MY_ATTRIBUTE((__nonnull__)); + + int read_next_chunk_from_disk(File fd) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int read_rec(rocksdb::Slice *const key, rocksdb::Slice *const val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int read_slice(rocksdb::Slice *const slice, const uchar **block_ptr) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + explicit merge_heap_entry(const rocksdb::Comparator *const comparator) + : chunk_info(nullptr), block(nullptr), comparator(comparator) {} + }; + + struct merge_heap_comparator { + bool operator()(const std::shared_ptr &lhs, + const std::shared_ptr &rhs) { + return lhs->comparator->Compare(rhs->key, lhs->key) < 0; + } + }; + + /* Represents a record in unsorted buffer */ + struct merge_record { + uchar *block; /* points to offset of key in sort buffer */ + const rocksdb::Comparator *const comparator; + + bool operator<(const merge_record &record) const { + return merge_record_compare(this->block, record.block, comparator) < 0; + } + + merge_record(uchar *const block, + const rocksdb::Comparator *const comparator) + : block(block), comparator(comparator) {} + }; + +private: + const char *m_tmpfile_path; + const ulonglong m_merge_buf_size; + const ulonglong m_merge_combine_read_size; + const rocksdb::Comparator *m_comparator; + struct merge_file_info m_merge_file; + std::shared_ptr m_rec_buf_unsorted; + std::shared_ptr m_output_buf; + std::set m_offset_tree; + std::priority_queue, + std::vector>, + merge_heap_comparator> + m_merge_min_heap; + + static inline void merge_store_uint64(uchar *const dst, uint64 n) { + memcpy(dst, &n, sizeof(n)); + } + + static inline void merge_read_uint64(const uchar **buf_ptr, + uint64 *const dst) { + DBUG_ASSERT(buf_ptr != nullptr); + memcpy(dst, *buf_ptr, sizeof(uint64)); + *buf_ptr += sizeof(uint64); + } + + static inline rocksdb::Slice as_slice(const uchar *block) { + uint64 len; + merge_read_uint64(&block, &len); + + return rocksdb::Slice(reinterpret_cast(block), len); + } + + static int merge_record_compare(const uchar *a_block, const uchar *b_block, + const rocksdb::Comparator *const comparator) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + void merge_read_rec(const uchar *const block, rocksdb::Slice *const key, + rocksdb::Slice *const val) MY_ATTRIBUTE((__nonnull__)); + + void read_slice(rocksdb::Slice *slice, const uchar *block_ptr) + MY_ATTRIBUTE((__nonnull__)); + +public: + Rdb_index_merge(const char *const tmpfile_path, + const ulonglong &merge_buf_size, + const ulonglong &merge_combine_read_size, + const rocksdb::Comparator *const comparator); + ~Rdb_index_merge(); + + int init() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int merge_file_create() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int add(const rocksdb::Slice &key, const rocksdb::Slice &val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int merge_buf_write() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int next(rocksdb::Slice *const key, rocksdb::Slice *const val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int merge_heap_prepare() MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + void merge_heap_top(rocksdb::Slice *key, rocksdb::Slice *val) + MY_ATTRIBUTE((__nonnull__)); + + int merge_heap_pop_and_get_next(rocksdb::Slice *const key, + rocksdb::Slice *const val) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + void merge_reset(); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_mariadb_port.h b/storage/rocksdb/rdb_mariadb_port.h new file mode 100644 index 0000000000000..f6f06eeb361a3 --- /dev/null +++ b/storage/rocksdb/rdb_mariadb_port.h @@ -0,0 +1,45 @@ +/* + A temporary header to resolve WebScaleSQL vs MariaDB differences + when porting MyRocks to MariaDB. +*/ +#ifndef RDB_MARIADB_PORT_H +#define RDB_MARIADB_PORT_H + +#include "my_global.h" /* ulonglong */ +#include "atomic_stat.h" + +/* The following is copied from storage/innobase/univ.i: */ +#ifndef MY_ATTRIBUTE +#if defined(__GNUC__) +# define MY_ATTRIBUTE(A) __attribute__(A) +#else +# define MY_ATTRIBUTE(A) +#endif +#endif + +/* Struct used for IO performance counters, shared among multiple threads */ +struct my_io_perf_atomic_struct { + atomic_stat bytes; + atomic_stat requests; + atomic_stat svc_time; /*!< time to do read or write operation */ + atomic_stat svc_time_max; + atomic_stat wait_time; /*!< total time in the request array */ + atomic_stat wait_time_max; + atomic_stat slow_ios; /*!< requests that take too long */ +}; +typedef struct my_io_perf_atomic_struct my_io_perf_atomic_t; + +//////////////////////////////////////////////////////////////////////////// + +/* + Temporary stand-in for + fae59683dc116be2cc78b0b30d61c84659c33bd3 + Print stack traces before committing suicide + +*/ +#define abort_with_stack_traces() { abort(); } + +//////////////////////////////////////////////////////////////////////////// +typedef struct my_io_perf_struct my_io_perf_t; + +#endif diff --git a/storage/rocksdb/rdb_mariadb_server_port.cc b/storage/rocksdb/rdb_mariadb_server_port.cc new file mode 100644 index 0000000000000..bd2c730d33dc6 --- /dev/null +++ b/storage/rocksdb/rdb_mariadb_server_port.cc @@ -0,0 +1,97 @@ +#include + + +/* MySQL includes */ +#include "./debug_sync.h" +#include "./my_bit.h" +#include "./my_stacktrace.h" +#include "./sql_table.h" +#include "./my_global.h" +#include "./log.h" +#include +#include +#ifdef MARIAROCKS_NOT_YET +#include +#endif + +#include + +/* MyRocks includes */ +#include "./rdb_threads.h" + +#include "rdb_mariadb_server_port.h" + +void warn_about_bad_patterns(const Regex_list_handler* regex_list_handler, + const char *name) +{ + // There was some invalid regular expression data in the patterns supplied + + // NO_LINT_DEBUG + sql_print_warning("Invalid pattern in %s: %s", name, + regex_list_handler->bad_pattern().c_str()); +} + + +/* + Set the patterns string. If there are invalid regex patterns they will + be stored in m_bad_patterns and the result will be false, otherwise the + result will be true. +*/ +bool Regex_list_handler::set_patterns(const std::string& pattern_str) +{ + bool pattern_valid= true; + + // Create a normalized version of the pattern string with all delimiters + // replaced by the '|' character + std::string norm_pattern= pattern_str; + std::replace(norm_pattern.begin(), norm_pattern.end(), m_delimiter, '|'); + + // Make sure no one else is accessing the list while we are changing it. + mysql_rwlock_wrlock(&m_rwlock); + + // Clear out any old error information + m_bad_pattern_str.clear(); + + try + { + // Replace all delimiters with the '|' operator and create the regex + // Note that this means the delimiter can not be part of a regular + // expression. This is currently not a problem as we are using the comma + // character as a delimiter and commas are not valid in table names. + const std::regex* pattern= new std::regex(norm_pattern); + + // Free any existing regex information and setup the new one + delete m_pattern; + m_pattern= pattern; + } + catch (const std::regex_error&) + { + // This pattern is invalid. + pattern_valid= false; + + // Put the bad pattern into a member variable so it can be retrieved later. + m_bad_pattern_str= pattern_str; + } + + // Release the lock + mysql_rwlock_unlock(&m_rwlock); + + return pattern_valid; +} + +bool Regex_list_handler::matches(const std::string& str) const +{ + DBUG_ASSERT(m_pattern != nullptr); + + // Make sure no one else changes the list while we are accessing it. + mysql_rwlock_rdlock(&m_rwlock); + + // See if the table name matches the regex we have created + bool found= std::regex_match(str, *m_pattern); + + // Release the lock + mysql_rwlock_unlock(&m_rwlock); + + return found; +} + diff --git a/storage/rocksdb/rdb_mariadb_server_port.h b/storage/rocksdb/rdb_mariadb_server_port.h new file mode 100644 index 0000000000000..e424fbb91f868 --- /dev/null +++ b/storage/rocksdb/rdb_mariadb_server_port.h @@ -0,0 +1,73 @@ +/* + A temporary header to resolve WebScaleSQL vs MariaDB differences + when porting MyRocks to MariaDB. +*/ +#ifndef RDB_MARIADB_SERVER_PORT_H +#define RDB_MARIADB_SERVER_PORT_H + +#include "my_global.h" /* ulonglong */ +#include "atomic_stat.h" +#include "my_pthread.h" +#include +#include + +/* + Code that is on SQL layer in facebook/mysql-5.6, + but is part of the storage engine in MariaRocks +*/ +#include + +class Regex_list_handler +{ + private: +#if defined(HAVE_PSI_INTERFACE) + const PSI_rwlock_key& m_key; +#endif + + char m_delimiter; + std::string m_bad_pattern_str; + const std::regex* m_pattern; + + mutable mysql_rwlock_t m_rwlock; + + Regex_list_handler(const Regex_list_handler& other)= delete; + Regex_list_handler& operator=(const Regex_list_handler& other)= delete; + + public: +#if defined(HAVE_PSI_INTERFACE) + Regex_list_handler(const PSI_rwlock_key& key, + char delimiter= ',') : + m_key(key), +#else + Regex_list_handler(char delimiter= ',') : +#endif + m_delimiter(delimiter), + m_bad_pattern_str(""), + m_pattern(nullptr) + { + mysql_rwlock_init(key, &m_rwlock); + } + + ~Regex_list_handler() + { + mysql_rwlock_destroy(&m_rwlock); + delete m_pattern; + } + + // Set the list of patterns + bool set_patterns(const std::string& patterns); + + // See if a string matches at least one pattern + bool matches(const std::string& str) const; + + // See the list of bad patterns + const std::string& bad_pattern() const + { + return m_bad_pattern_str; + } +}; + +void warn_about_bad_patterns(const Regex_list_handler* regex_list_handler, + const char *name); + +#endif diff --git a/storage/rocksdb/rdb_mutex_wrapper.cc b/storage/rocksdb/rdb_mutex_wrapper.cc new file mode 100644 index 0000000000000..ed81bc0dc9039 --- /dev/null +++ b/storage/rocksdb/rdb_mutex_wrapper.cc @@ -0,0 +1,215 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include + +/* This C++ file's header file */ +#include "./rdb_mutex_wrapper.h" + +/* The following are for THD_ENTER_COND: */ +#define MYSQL_SERVER 1 +#include "sql_priv.h" +#include "my_decimal.h" +#include "sql_class.h" +//psergey-merge-todo: does MariaDB have/need: #include "../sql/replication.h" + + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./rdb_utils.h" + + +using namespace rocksdb; + +namespace myrocks { + +static PSI_stage_info stage_waiting_on_row_lock2 = {0, "Waiting for row lock", + 0}; + +static const int64_t ONE_SECOND_IN_MICROSECS = 1000 * 1000; +// A timeout as long as one full non-leap year worth of microseconds is as +// good as infinite timeout. +static const int64_t ONE_YEAR_IN_MICROSECS = + ONE_SECOND_IN_MICROSECS * 60 * 60 * 24 * 365; + +Rdb_cond_var::Rdb_cond_var() { mysql_cond_init(0, &m_cond, nullptr); } + +Rdb_cond_var::~Rdb_cond_var() { mysql_cond_destroy(&m_cond); } + +Status Rdb_cond_var::Wait(const std::shared_ptr mutex_arg) { + return WaitFor(mutex_arg, ONE_YEAR_IN_MICROSECS); +} + +/* + @brief + Wait on condition variable. The caller must make sure that we own + *mutex_ptr. The mutex is released and re-acquired by the wait function. + + @param + timeout_micros Timeout in microseconds. Negative value means no timeout. + + @return + Status::OK() - Wait successfull + Status::TimedOut() - Timed out or wait killed (the caller can check + thd_killed() to determine which occurred) +*/ + +Status +Rdb_cond_var::WaitFor(const std::shared_ptr mutex_arg, + int64_t timeout_micros) { + auto *mutex_obj = reinterpret_cast(mutex_arg.get()); + DBUG_ASSERT(mutex_obj != nullptr); + + mysql_mutex_t *const mutex_ptr = &mutex_obj->m_mutex; + + int res = 0; + struct timespec wait_timeout; + + if (timeout_micros < 0) + timeout_micros = ONE_YEAR_IN_MICROSECS; + set_timespec_nsec(wait_timeout, timeout_micros * 1000); + +#ifndef STANDALONE_UNITTEST + PSI_stage_info old_stage; + mysql_mutex_assert_owner(mutex_ptr); + + if (current_thd && mutex_obj->m_old_stage_info.count(current_thd) == 0) { + THD_ENTER_COND(current_thd, &m_cond, mutex_ptr, &stage_waiting_on_row_lock2, + &old_stage); + /* + After the mysql_cond_timedwait we need make this call + + THD_EXIT_COND(thd, &old_stage); + + to inform the SQL layer that KILLable wait has ended. However, + that will cause mutex to be released. Defer the release until the mutex + that is unlocked by RocksDB's Pessimistic Transactions system. + */ + mutex_obj->set_unlock_action(&old_stage); + } + +#endif + bool killed = false; + + do { + res = mysql_cond_timedwait(&m_cond, mutex_ptr, &wait_timeout); + +#ifndef STANDALONE_UNITTEST + if (current_thd) + killed= thd_killed(current_thd); +#endif + } while (!killed && res == EINTR); + + if (res || killed) + return Status::TimedOut(); + else + return Status::OK(); +} + +/* + + @note + This function may be called while not holding the mutex that is used to wait + on the condition variable. + + The manual page says ( http://linux.die.net/man/3/pthread_cond_signal): + + The pthread_cond_broadcast() or pthread_cond_signal() functions may be called + by a thread whether or not it currently owns the mutex that threads calling + pthread_cond_wait() or pthread_cond_timedwait() have associated with the + condition variable during their waits; however, IF PREDICTABLE SCHEDULING + BEHAVIOR IS REQUIRED, THEN THAT MUTEX SHALL BE LOCKED by the thread calling + pthread_cond_broadcast() or pthread_cond_signal(). + + What's "predicate scheduling" and do we need it? The explanation is here: + + https://groups.google.com/forum/?hl=ky#!msg/comp.programming.threads/wEUgPq541v8/ZByyyS8acqMJ + "The problem (from the realtime side) with condition variables is that + if you can signal/broadcast without holding the mutex, and any thread + currently running can acquire an unlocked mutex and check a predicate + without reference to the condition variable, then you can have an + indirect priority inversion." + + Another possible consequence is that one can create spurious wake-ups when + there are multiple threads signaling the condition. + + None of this looks like a problem for our use case. +*/ + +void Rdb_cond_var::Notify() { mysql_cond_signal(&m_cond); } + +/* + @note + This is called without holding the mutex that's used for waiting on the + condition. See ::Notify(). +*/ +void Rdb_cond_var::NotifyAll() { mysql_cond_broadcast(&m_cond); } + +Rdb_mutex::Rdb_mutex() { + mysql_mutex_init(0 /* Don't register in P_S. */, &m_mutex, + MY_MUTEX_INIT_FAST); +} + +Rdb_mutex::~Rdb_mutex() { mysql_mutex_destroy(&m_mutex); } + +Status Rdb_mutex::Lock() { + RDB_MUTEX_LOCK_CHECK(m_mutex); + DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0); + return Status::OK(); +} + +// Attempt to acquire lock. If timeout is non-negative, operation may be +// failed after this many milliseconds. +// If implementing a custom version of this class, the implementation may +// choose to ignore the timeout. +// Return OK on success, or other Status on failure. +Status Rdb_mutex::TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) { + /* + Note: PThreads API has pthread_mutex_timedlock(), but mysql's + mysql_mutex_* wrappers do not wrap that function. + */ + RDB_MUTEX_LOCK_CHECK(m_mutex); + return Status::OK(); +} + +#ifndef STANDALONE_UNITTEST +void Rdb_mutex::set_unlock_action(const PSI_stage_info *const old_stage_arg) { + DBUG_ASSERT(old_stage_arg != nullptr); + + mysql_mutex_assert_owner(&m_mutex); + DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0); + + m_old_stage_info[current_thd] = + std::make_shared(*old_stage_arg); +} +#endif + +// Unlock Mutex that was successfully locked by Lock() or TryLockUntil() +void Rdb_mutex::UnLock() { +#ifndef STANDALONE_UNITTEST + if (m_old_stage_info.count(current_thd) > 0) { + const std::shared_ptr old_stage = + m_old_stage_info[current_thd]; + m_old_stage_info.erase(current_thd); + /* The following will call mysql_mutex_unlock */ + THD_EXIT_COND(current_thd, old_stage.get()); + return; + } +#endif + RDB_MUTEX_UNLOCK_CHECK(m_mutex); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_mutex_wrapper.h b/storage/rocksdb/rdb_mutex_wrapper.h new file mode 100644 index 0000000000000..287a6112345b2 --- /dev/null +++ b/storage/rocksdb/rdb_mutex_wrapper.h @@ -0,0 +1,143 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ standard header file */ +#include +#include +#include +#include +#include + +/* MySQL header files */ +#include "./my_sys.h" +#include "mysql/plugin.h" + +/* RocksDB header files */ +#include "rocksdb/utilities/transaction_db_mutex.h" +#include "rdb_mariadb_port.h" + +namespace myrocks { + +class Rdb_mutex : public rocksdb::TransactionDBMutex { + Rdb_mutex(const Rdb_mutex &p) = delete; + Rdb_mutex &operator=(const Rdb_mutex &p) = delete; + +public: + Rdb_mutex(); + virtual ~Rdb_mutex(); + + /* + Override parent class's virtual methods of interrest. + */ + + // Attempt to acquire lock. Return OK on success, or other Status on failure. + // If returned status is OK, TransactionDB will eventually call UnLock(). + virtual rocksdb::Status Lock() override; + + // Attempt to acquire lock. If timeout is non-negative, operation should be + // failed after this many microseconds. + // Returns OK on success, + // TimedOut if timed out, + // or other Status on failure. + // If returned status is OK, TransactionDB will eventually call UnLock(). + virtual rocksdb::Status + TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) override; + + // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() + virtual void UnLock() override; + +private: + mysql_mutex_t m_mutex; + friend class Rdb_cond_var; + +#ifndef STANDALONE_UNITTEST + void set_unlock_action(const PSI_stage_info *const old_stage_arg); + std::unordered_map> m_old_stage_info; +#endif +}; + +class Rdb_cond_var : public rocksdb::TransactionDBCondVar { + Rdb_cond_var(const Rdb_cond_var &) = delete; + Rdb_cond_var &operator=(const Rdb_cond_var &) = delete; + +public: + Rdb_cond_var(); + virtual ~Rdb_cond_var(); + + /* + Override parent class's virtual methods of interrest. + */ + + // Block current thread until condition variable is notified by a call to + // Notify() or NotifyAll(). Wait() will be called with mutex locked. + // Returns OK if notified. + // Returns non-OK if TransactionDB should stop waiting and fail the operation. + // May return OK spuriously even if not notified. + virtual rocksdb::Status + Wait(const std::shared_ptr mutex) override; + + // Block current thread until condition variable is notifiesd by a call to + // Notify() or NotifyAll(), or if the timeout is reached. + // If timeout is non-negative, operation should be failed after this many + // microseconds. + // If implementing a custom version of this class, the implementation may + // choose to ignore the timeout. + // + // Returns OK if notified. + // Returns TimedOut if timeout is reached. + // Returns other status if TransactionDB should otherwis stop waiting and + // fail the operation. + // May return OK spuriously even if not notified. + virtual rocksdb::Status + WaitFor(const std::shared_ptr mutex, + int64_t timeout_time) override; + + // If any threads are waiting on *this, unblock at least one of the + // waiting threads. + virtual void Notify() override; + + // Unblocks all threads waiting on *this. + virtual void NotifyAll() override; + +private: + mysql_cond_t m_cond; +}; + +class Rdb_mutex_factory : public rocksdb::TransactionDBMutexFactory { +public: + Rdb_mutex_factory(const Rdb_mutex_factory &) = delete; + Rdb_mutex_factory &operator=(const Rdb_mutex_factory &) = delete; + Rdb_mutex_factory() {} + /* + Override parent class's virtual methods of interrest. + */ + + virtual std::shared_ptr + AllocateMutex() override { + return std::make_shared(); + } + + virtual std::shared_ptr + AllocateCondVar() override { + return std::make_shared(); + } + + virtual ~Rdb_mutex_factory() {} +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc new file mode 100644 index 0000000000000..7fdee15157e03 --- /dev/null +++ b/storage/rocksdb/rdb_perf_context.cc @@ -0,0 +1,231 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include + +#include "rdb_mariadb_port.h" +/* This C++ file's header file */ +#include "./rdb_perf_context.h" + +/* C++ system header files */ +#include + +/* RocksDB header files */ +#include "rocksdb/iostats_context.h" +#include "rocksdb/perf_context.h" + +/* MyRocks header files */ +#include "./ha_rocksdb_proto.h" + +namespace myrocks { + +// To add a new metric: +// 1. Update the PC enum in rdb_perf_context.h +// 2. Update sections (A), (B), and (C) below +// 3. Update perf_context.test and show_engine.test + +std::string rdb_pc_stat_types[] = { + // (A) These should be in the same order as the PC enum + "USER_KEY_COMPARISON_COUNT", + "BLOCK_CACHE_HIT_COUNT", + "BLOCK_READ_COUNT", + "BLOCK_READ_BYTE", + "BLOCK_READ_TIME", + "BLOCK_CHECKSUM_TIME", + "BLOCK_DECOMPRESS_TIME", + "INTERNAL_KEY_SKIPPED_COUNT", + "INTERNAL_DELETE_SKIPPED_COUNT", + "GET_SNAPSHOT_TIME", + "GET_FROM_MEMTABLE_TIME", + "GET_FROM_MEMTABLE_COUNT", + "GET_POST_PROCESS_TIME", + "GET_FROM_OUTPUT_FILES_TIME", + "SEEK_ON_MEMTABLE_TIME", + "SEEK_ON_MEMTABLE_COUNT", + "SEEK_CHILD_SEEK_TIME", + "SEEK_CHILD_SEEK_COUNT", + "SEEK_IN_HEAP_TIME", + "SEEK_INTERNAL_SEEK_TIME", + "FIND_NEXT_USER_ENTRY_TIME", + "WRITE_WAL_TIME", + "WRITE_MEMTABLE_TIME", + "WRITE_DELAY_TIME", + "WRITE_PRE_AND_POST_PROCESS_TIME", + "DB_MUTEX_LOCK_NANOS", + "DB_CONDITION_WAIT_NANOS", + "MERGE_OPERATOR_TIME_NANOS", + "READ_INDEX_BLOCK_NANOS", + "READ_FILTER_BLOCK_NANOS", + "NEW_TABLE_BLOCK_ITER_NANOS", + "NEW_TABLE_ITERATOR_NANOS", + "BLOCK_SEEK_NANOS", + "FIND_TABLE_NANOS", + "IO_THREAD_POOL_ID", + "IO_BYTES_WRITTEN", + "IO_BYTES_READ", + "IO_OPEN_NANOS", + "IO_ALLOCATE_NANOS", + "IO_WRITE_NANOS", + "IO_READ_NANOS", + "IO_RANGE_SYNC_NANOS", + "IO_LOGGER_NANOS"}; + +#define IO_PERF_RECORD(_field_) \ + do { \ + if (rocksdb::perf_context._field_ > 0) \ + counters->m_value[idx] += rocksdb::perf_context._field_; \ + idx++; \ + } while (0) +#define IO_STAT_RECORD(_field_) \ + do { \ + if (rocksdb::iostats_context._field_ > 0) \ + counters->m_value[idx] += rocksdb::iostats_context._field_; \ + idx++; \ + } while (0) + +static void harvest_diffs(Rdb_atomic_perf_counters *const counters) { + // (C) These should be in the same order as the PC enum + size_t idx = 0; + IO_PERF_RECORD(user_key_comparison_count); + IO_PERF_RECORD(block_cache_hit_count); + IO_PERF_RECORD(block_read_count); + IO_PERF_RECORD(block_read_byte); + IO_PERF_RECORD(block_read_time); + IO_PERF_RECORD(block_checksum_time); + IO_PERF_RECORD(block_decompress_time); + IO_PERF_RECORD(internal_key_skipped_count); + IO_PERF_RECORD(internal_delete_skipped_count); + IO_PERF_RECORD(get_snapshot_time); + IO_PERF_RECORD(get_from_memtable_time); + IO_PERF_RECORD(get_from_memtable_count); + IO_PERF_RECORD(get_post_process_time); + IO_PERF_RECORD(get_from_output_files_time); + IO_PERF_RECORD(seek_on_memtable_time); + IO_PERF_RECORD(seek_on_memtable_count); + IO_PERF_RECORD(seek_child_seek_time); + IO_PERF_RECORD(seek_child_seek_count); + IO_PERF_RECORD(seek_min_heap_time); + IO_PERF_RECORD(seek_internal_seek_time); + IO_PERF_RECORD(find_next_user_entry_time); + IO_PERF_RECORD(write_wal_time); + IO_PERF_RECORD(write_memtable_time); + IO_PERF_RECORD(write_delay_time); + IO_PERF_RECORD(write_pre_and_post_process_time); + IO_PERF_RECORD(db_mutex_lock_nanos); + IO_PERF_RECORD(db_condition_wait_nanos); + IO_PERF_RECORD(merge_operator_time_nanos); + IO_PERF_RECORD(read_index_block_nanos); + IO_PERF_RECORD(read_filter_block_nanos); + IO_PERF_RECORD(new_table_block_iter_nanos); + IO_PERF_RECORD(new_table_iterator_nanos); + IO_PERF_RECORD(block_seek_nanos); + IO_PERF_RECORD(find_table_nanos); + IO_STAT_RECORD(thread_pool_id); + IO_STAT_RECORD(bytes_written); + IO_STAT_RECORD(bytes_read); + IO_STAT_RECORD(open_nanos); + IO_STAT_RECORD(allocate_nanos); + IO_STAT_RECORD(write_nanos); + IO_STAT_RECORD(read_nanos); + IO_STAT_RECORD(range_sync_nanos); + IO_STAT_RECORD(logger_nanos); +} + +#undef IO_PERF_DIFF +#undef IO_STAT_DIFF + +static Rdb_atomic_perf_counters rdb_global_perf_counters; + +void rdb_get_global_perf_counters(Rdb_perf_counters *const counters) { + DBUG_ASSERT(counters != nullptr); + + counters->load(rdb_global_perf_counters); +} + +void Rdb_perf_counters::load(const Rdb_atomic_perf_counters &atomic_counters) { + for (int i = 0; i < PC_MAX_IDX; i++) { + m_value[i] = atomic_counters.m_value[i].load(std::memory_order_relaxed); + } +} + +bool Rdb_io_perf::start(const uint32_t perf_context_level) { + const rocksdb::PerfLevel perf_level = + static_cast(perf_context_level); + + if (rocksdb::GetPerfLevel() != perf_level) { + rocksdb::SetPerfLevel(perf_level); + } + + if (perf_level == rocksdb::kDisable) { + return false; + } + + rocksdb::perf_context.Reset(); + rocksdb::iostats_context.Reset(); + return true; +} + +void Rdb_io_perf::end_and_record(const uint32_t perf_context_level) { + const rocksdb::PerfLevel perf_level = + static_cast(perf_context_level); + + if (perf_level == rocksdb::kDisable) { + return; + } + + if (m_atomic_counters) { + harvest_diffs(m_atomic_counters); + } + harvest_diffs(&rdb_global_perf_counters); + + if (m_shared_io_perf_read && (rocksdb::perf_context.block_read_byte != 0 || + rocksdb::perf_context.block_read_count != 0 || + rocksdb::perf_context.block_read_time != 0)) { +#ifdef MARIAROCKS_NOT_YET + my_io_perf_t io_perf_read; + + io_perf_read.init(); + io_perf_read.bytes = rocksdb::perf_context.block_read_byte; + io_perf_read.requests = rocksdb::perf_context.block_read_count; + + /* + Rocksdb does not distinguish between I/O service and wait time, so just + use svc time. + */ + io_perf_read.svc_time_max = io_perf_read.svc_time = + rocksdb::perf_context.block_read_time; + + m_shared_io_perf_read->sum(io_perf_read); + m_stats->table_io_perf_read.sum(io_perf_read); +#endif + } + +#ifdef MARIAROCKS_NOT_YET + if (m_stats) { + if (rocksdb::perf_context.internal_key_skipped_count != 0) { + m_stats->key_skipped += rocksdb::perf_context.internal_key_skipped_count; + } + + if (rocksdb::perf_context.internal_delete_skipped_count != 0) { + m_stats->delete_skipped += + rocksdb::perf_context.internal_delete_skipped_count; + } + } +#endif +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.h b/storage/rocksdb/rdb_perf_context.h new file mode 100644 index 0000000000000..9d580ff0b8a4f --- /dev/null +++ b/storage/rocksdb/rdb_perf_context.h @@ -0,0 +1,139 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012,2013 Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include +#include +#include + +/* MySQL header files */ +#include "./handler.h" +#include + +#include "rdb_mariadb_port.h" + +namespace myrocks { + +enum { + PC_USER_KEY_COMPARISON_COUNT = 0, + PC_BLOCK_CACHE_HIT_COUNT, + PC_BLOCK_READ_COUNT, + PC_BLOCK_READ_BYTE, + PC_BLOCK_READ_TIME, + PC_BLOCK_CHECKSUM_TIME, + PC_BLOCK_DECOMPRESS_TIME, + PC_KEY_SKIPPED, + PC_DELETE_SKIPPED, + PC_GET_SNAPSHOT_TIME, + PC_GET_FROM_MEMTABLE_TIME, + PC_GET_FROM_MEMTABLE_COUNT, + PC_GET_POST_PROCESS_TIME, + PC_GET_FROM_OUTPUT_FILES_TIME, + PC_SEEK_ON_MEMTABLE_TIME, + PC_SEEK_ON_MEMTABLE_COUNT, + PC_SEEK_CHILD_SEEK_TIME, + PC_SEEK_CHILD_SEEK_COUNT, + PC_SEEK_MIN_HEAP_TIME, + PC_SEEK_INTERNAL_SEEK_TIME, + PC_FIND_NEXT_USER_ENTRY_TIME, + PC_WRITE_WAL_TIME, + PC_WRITE_MEMTABLE_TIME, + PC_WRITE_DELAY_TIME, + PC_WRITE_PRE_AND_POST_PROCESSS_TIME, + PC_DB_MUTEX_LOCK_NANOS, + PC_DB_CONDITION_WAIT_NANOS, + PC_MERGE_OPERATOR_TIME_NANOS, + PC_READ_INDEX_BLOCK_NANOS, + PC_READ_FILTER_BLOCK_NANOS, + PC_NEW_TABLE_BLOCK_ITER_NANOS, + PC_NEW_TABLE_ITERATOR_NANOS, + PC_BLOCK_SEEK_NANOS, + PC_FIND_TABLE_NANOS, + PC_IO_THREAD_POOL_ID, + PC_IO_BYTES_WRITTEN, + PC_IO_BYTES_READ, + PC_IO_OPEN_NANOS, + PC_IO_ALLOCATE_NANOS, + PC_IO_WRITE_NANOS, + PC_IO_READ_NANOS, + PC_IO_RANGE_SYNC_NANOS, + PC_IO_LOGGER_NANOS, + PC_MAX_IDX +}; + +class Rdb_perf_counters; + +/* + A collection of performance counters that can be safely incremented by + multiple threads since it stores atomic datapoints. +*/ +struct Rdb_atomic_perf_counters { + std::atomic_ullong m_value[PC_MAX_IDX]; +}; + +/* + A collection of performance counters that is meant to be incremented by + a single thread. +*/ +class Rdb_perf_counters { + Rdb_perf_counters(const Rdb_perf_counters &) = delete; + Rdb_perf_counters &operator=(const Rdb_perf_counters &) = delete; + +public: + Rdb_perf_counters() = default; + uint64_t m_value[PC_MAX_IDX]; + + void load(const Rdb_atomic_perf_counters &atomic_counters); +}; + +extern std::string rdb_pc_stat_types[PC_MAX_IDX]; + +/* + Perf timers for data reads + */ +class Rdb_io_perf { + // Context management + Rdb_atomic_perf_counters *m_atomic_counters = nullptr; + my_io_perf_atomic_t *m_shared_io_perf_read = nullptr; + ha_statistics *m_stats = nullptr; + +public: + Rdb_io_perf(const Rdb_io_perf &) = delete; + Rdb_io_perf &operator=(const Rdb_io_perf &) = delete; + + void init(Rdb_atomic_perf_counters *const atomic_counters, + my_io_perf_atomic_t *const shared_io_perf_read, + ha_statistics *const stats) { + DBUG_ASSERT(atomic_counters != nullptr); + DBUG_ASSERT(shared_io_perf_read != nullptr); + DBUG_ASSERT(stats != nullptr); + + m_atomic_counters = atomic_counters; + m_shared_io_perf_read = shared_io_perf_read; + m_stats = stats; + } + + bool start(const uint32_t perf_context_level); + void end_and_record(const uint32_t perf_context_level); + + explicit Rdb_io_perf() + : m_atomic_counters(nullptr), m_shared_io_perf_read(nullptr), + m_stats(nullptr) {} +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_psi.cc b/storage/rocksdb/rdb_psi.cc new file mode 100644 index 0000000000000..b6bc89a02f912 --- /dev/null +++ b/storage/rocksdb/rdb_psi.cc @@ -0,0 +1,113 @@ +/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#define MYSQL_SERVER 1 + +/* The C++ file's header */ +#include "./rdb_psi.h" + +/* MySQL header files */ +#include + +namespace myrocks { + +/* + The following is needed as an argument for mysql_stage_register, + irrespectively of whether we're compiling with P_S or not. +*/ +my_core::PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock", + 0}; + +#ifdef HAVE_PSI_INTERFACE +my_core::PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock}; + +my_core::PSI_thread_key rdb_background_psi_thread_key, + rdb_drop_idx_psi_thread_key; + +my_core::PSI_thread_info all_rocksdb_threads[] = { + {&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL}, + {&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL}, +}; + +my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, rdb_signal_bg_psi_mutex_key, + rdb_signal_drop_idx_psi_mutex_key, rdb_collation_data_mutex_key, + rdb_mem_cmp_space_mutex_key, key_mutex_tx_list, rdb_sysvars_psi_mutex_key, + rdb_cfm_mutex_key; + +my_core::PSI_mutex_info all_rocksdb_mutexes[] = { + {&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL}, + {&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL}, + {&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL}, + {&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL}, + {&rdb_mem_cmp_space_mutex_key, "collation space char data init", + PSI_FLAG_GLOBAL}, + {&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL}, + {&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL}, + {&rdb_cfm_mutex_key, "column family manager", PSI_FLAG_GLOBAL}, +}; + +my_core::PSI_rwlock_key key_rwlock_collation_exception_list, + key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables; + +my_core::PSI_rwlock_info all_rocksdb_rwlocks[] = { + {&key_rwlock_collation_exception_list, "collation_exception_list", + PSI_FLAG_GLOBAL}, + {&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL}, + {&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables", + PSI_FLAG_GLOBAL}, +}; + +my_core::PSI_cond_key rdb_signal_bg_psi_cond_key, + rdb_signal_drop_idx_psi_cond_key; + +my_core::PSI_cond_info all_rocksdb_conds[] = { + {&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL}, + {&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index", + PSI_FLAG_GLOBAL}, +}; + +void init_rocksdb_psi_keys() { + const char *const category = "rocksdb"; + int count; + + if (PSI_server == nullptr) + return; + + count = array_elements(all_rocksdb_mutexes); + PSI_server->register_mutex(category, all_rocksdb_mutexes, count); + + count = array_elements(all_rocksdb_rwlocks); + PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count); + + count = array_elements(all_rocksdb_conds); + //TODO Disabling PFS for conditions due to the bug + // https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92 + // PSI_server->register_cond(category, all_rocksdb_conds, count); + + count = array_elements(all_rocksdb_stages); + mysql_stage_register(category, all_rocksdb_stages, count); + + count = array_elements(all_rocksdb_threads); + mysql_thread_register(category, all_rocksdb_threads, count); +} +#else // HAVE_PSI_INTERFACE +void init_rocksdb_psi_keys() {} +#endif // HAVE_PSI_INTERFACE + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_psi.h b/storage/rocksdb/rdb_psi.h new file mode 100644 index 0000000000000..0a62f411ade62 --- /dev/null +++ b/storage/rocksdb/rdb_psi.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#pragma once + +#ifndef _rdb_psi_h_ +#define _rdb_psi_h_ + +/* MySQL header files */ +#include +#include +#include + +/* MyRocks header files */ +#include "./rdb_utils.h" + +namespace myrocks { + +/* + The following is needed as an argument for mysql_stage_register, + irrespectively of whether we're compiling with P_S or not. +*/ +extern my_core::PSI_stage_info stage_waiting_on_row_lock; + +#ifdef HAVE_PSI_INTERFACE +extern my_core::PSI_thread_key rdb_background_psi_thread_key, + rdb_drop_idx_psi_thread_key; + +extern my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, + rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key, + rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key, + key_mutex_tx_list, rdb_sysvars_psi_mutex_key, rdb_cfm_mutex_key; + +extern my_core::PSI_rwlock_key key_rwlock_collation_exception_list, + key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables; + +extern my_core::PSI_cond_key rdb_signal_bg_psi_cond_key, + rdb_signal_drop_idx_psi_cond_key; +#endif // HAVE_PSI_INTERFACE + +void init_rocksdb_psi_keys(); + +} // namespace myrocks + +#endif // _rdb_psi_h_ diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc new file mode 100644 index 0000000000000..34d32aeb3478c --- /dev/null +++ b/storage/rocksdb/rdb_sst_info.cc @@ -0,0 +1,431 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* For PRIu64 use below: */ +#define __STDC_FORMAT_MACROS + +#include + +/* This C++ file's header file */ +#include "./rdb_sst_info.h" + +#include + +/* C++ standard header files */ +#include +#include +#include + +/* MySQL header files */ +#include +#include "../sql/log.h" +#include "./my_dir.h" + +/* RocksDB header files */ +#include "rocksdb/db.h" +#include "rocksdb/options.h" + +/* MyRocks header files */ +#include "./ha_rocksdb.h" +#include "./ha_rocksdb_proto.h" +#include "./rdb_cf_options.h" + +namespace myrocks { + +Rdb_sst_file::Rdb_sst_file(rocksdb::DB *const db, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, + const std::string &name, const bool tracing) + : m_db(db), m_cf(cf), m_db_options(db_options), m_sst_file_writer(nullptr), + m_name(name), m_tracing(tracing) { + DBUG_ASSERT(db != nullptr); + DBUG_ASSERT(cf != nullptr); +} + +Rdb_sst_file::~Rdb_sst_file() { + // Make sure we clean up + delete m_sst_file_writer; + m_sst_file_writer = nullptr; + + // In case something went wrong attempt to delete the temporary file. + // If everything went fine that file will have been renamed and this + // function call will fail. + std::remove(m_name.c_str()); +} + +rocksdb::Status Rdb_sst_file::open() { + DBUG_ASSERT(m_sst_file_writer == nullptr); + + rocksdb::ColumnFamilyDescriptor cf_descr; + + rocksdb::Status s = m_cf->GetDescriptor(&cf_descr); + if (!s.ok()) { + return s; + } + + // Create an sst file writer with the current options and comparator + const rocksdb::Comparator *comparator = m_cf->GetComparator(); + + const rocksdb::EnvOptions env_options(m_db_options); + const rocksdb::Options options(m_db_options, cf_descr.options); + + m_sst_file_writer = + new rocksdb::SstFileWriter(env_options, options, comparator, m_cf); + + s = m_sst_file_writer->Open(m_name); + if (m_tracing) { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: Open(%s) returned %s", m_name.c_str(), + s.ok() ? "ok" : "not ok"); + } + + if (!s.ok()) { + delete m_sst_file_writer; + m_sst_file_writer = nullptr; + } + + return s; +} + +rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice &key, + const rocksdb::Slice &value) { + DBUG_ASSERT(m_sst_file_writer != nullptr); + + // Add the specified key/value to the sst file writer + return m_sst_file_writer->Add(key, value); +} + +std::string Rdb_sst_file::generateKey(const std::string &key) { + static char const hexdigit[] = {'0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; + + std::string res; + + res.reserve(key.size() * 2); + + for (auto ch : key) { + res += hexdigit[((uint8_t)ch) >> 4]; + res += hexdigit[((uint8_t)ch) & 0x0F]; + } + + return res; +} + +// This function is run by the background thread +rocksdb::Status Rdb_sst_file::commit() { + DBUG_ASSERT(m_sst_file_writer != nullptr); + + rocksdb::Status s; + rocksdb::ExternalSstFileInfo fileinfo; /// Finish may should be modified + + // Close out the sst file + s = m_sst_file_writer->Finish(&fileinfo); + if (m_tracing) { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: Finish returned %s", + s.ok() ? "ok" : "not ok"); + } + + if (s.ok()) { + if (m_tracing) { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: Adding file %s, smallest key: %s, " + "largest key: %s, file size: %" PRIu64 ", " + "num_entries: %" PRIu64, + fileinfo.file_path.c_str(), + generateKey(fileinfo.smallest_key).c_str(), + generateKey(fileinfo.largest_key).c_str(), + fileinfo.file_size, fileinfo.num_entries); + } + + // Add the file to the database + // Set the snapshot_consistency parameter to false since no one + // should be accessing the table we are bulk loading + rocksdb::IngestExternalFileOptions opts; + opts.move_files = true; + opts.snapshot_consistency = false; + opts.allow_global_seqno = false; + opts.allow_blocking_flush = false; + s = m_db->IngestExternalFile(m_cf, {m_name}, opts); + + if (m_tracing) { + // NO_LINT_DEBUG + sql_print_information("SST Tracing: AddFile(%s) returned %s", + fileinfo.file_path.c_str(), + s.ok() ? "ok" : "not ok"); + } + } + + delete m_sst_file_writer; + m_sst_file_writer = nullptr; + + return s; +} + +Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, + const std::string &indexname, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, + const bool &tracing) + : m_db(db), m_cf(cf), m_db_options(db_options), m_curr_size(0), + m_sst_count(0), m_error_msg(""), +#if defined(RDB_SST_INFO_USE_THREAD) + m_queue(), m_mutex(), m_cond(), m_thread(nullptr), m_finished(false), +#endif + m_sst_file(nullptr), m_tracing(tracing) { + m_prefix = db->GetName() + "/"; + + std::string normalized_table; + if (rdb_normalize_tablename(tablename.c_str(), &normalized_table)) { + // We failed to get a normalized table name. This should never happen, + // but handle it anyway. + m_prefix += "fallback_" + std::to_string(reinterpret_cast( + reinterpret_cast(this))) + + "_" + indexname + "_"; + } else { + m_prefix += normalized_table + "_" + indexname + "_"; + } + + // Unique filename generated to prevent collisions when the same table + // is loaded in parallel + m_prefix += std::to_string(m_prefix_counter.fetch_add(1)) + "_"; + + rocksdb::ColumnFamilyDescriptor cf_descr; + const rocksdb::Status s = m_cf->GetDescriptor(&cf_descr); + if (!s.ok()) { + // Default size if we can't get the cf's target size + m_max_size = 64 * 1024 * 1024; + } else { + // Set the maximum size to 3 times the cf's target size + m_max_size = cf_descr.options.target_file_size_base * 3; + } +} + +Rdb_sst_info::~Rdb_sst_info() { + DBUG_ASSERT(m_sst_file == nullptr); +#if defined(RDB_SST_INFO_USE_THREAD) + DBUG_ASSERT(m_thread == nullptr); +#endif +} + +int Rdb_sst_info::open_new_sst_file() { + DBUG_ASSERT(m_sst_file == nullptr); + + // Create the new sst file's name + const std::string name = m_prefix + std::to_string(m_sst_count++) + m_suffix; + + // Create the new sst file object + m_sst_file = new Rdb_sst_file(m_db, m_cf, m_db_options, name, m_tracing); + + // Open the sst file + const rocksdb::Status s = m_sst_file->open(); + if (!s.ok()) { + set_error_msg(m_sst_file->get_name(), s.ToString()); + delete m_sst_file; + m_sst_file = nullptr; + return HA_EXIT_FAILURE; + } + + m_curr_size = 0; + + return HA_EXIT_SUCCESS; +} + +void Rdb_sst_info::close_curr_sst_file() { + DBUG_ASSERT(m_sst_file != nullptr); + DBUG_ASSERT(m_curr_size > 0); + +#if defined(RDB_SST_INFO_USE_THREAD) + if (m_thread == nullptr) { + // We haven't already started a background thread, so start one + m_thread = new std::thread(thread_fcn, this); + } + + DBUG_ASSERT(m_thread != nullptr); + + { + // Add this finished sst file to the queue (while holding mutex) + const std::lock_guard guard(m_mutex); + m_queue.push(m_sst_file); + } + + // Notify the background thread that there is a new entry in the queue + m_cond.notify_one(); +#else + const rocksdb::Status s = m_sst_file->commit(); + if (!s.ok()) { + set_error_msg(m_sst_file->get_name(), s.ToString()); + } + + delete m_sst_file; +#endif + + // Reset for next sst file + m_sst_file = nullptr; + m_curr_size = 0; +} + +int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { + int rc; + + if (m_curr_size >= m_max_size) { + // The current sst file has reached its maximum, close it out + close_curr_sst_file(); + + // While we are here, check to see if we have had any errors from the + // background thread - we don't want to wait for the end to report them + if (!m_error_msg.empty()) { + return HA_EXIT_FAILURE; + } + } + + if (m_curr_size == 0) { + // We don't have an sst file open - open one + rc = open_new_sst_file(); + if (rc != 0) { + return rc; + } + } + + DBUG_ASSERT(m_sst_file != nullptr); + + // Add the key/value to the current sst file + const rocksdb::Status s = m_sst_file->put(key, value); + if (!s.ok()) { + set_error_msg(m_sst_file->get_name(), s.ToString()); + return HA_EXIT_FAILURE; + } + + m_curr_size += key.size() + value.size(); + + return HA_EXIT_SUCCESS; +} + +int Rdb_sst_info::commit() { + if (m_curr_size > 0) { + // Close out any existing files + close_curr_sst_file(); + } + +#if defined(RDB_SST_INFO_USE_THREAD) + if (m_thread != nullptr) { + // Tell the background thread we are done + m_finished = true; + m_cond.notify_one(); + + // Wait for the background thread to finish + m_thread->join(); + delete m_thread; + m_thread = nullptr; + } +#endif + + // Did we get any errors? + if (!m_error_msg.empty()) { + return HA_EXIT_FAILURE; + } + + return HA_EXIT_SUCCESS; +} + +void Rdb_sst_info::set_error_msg(const std::string &sst_file_name, + const std::string &msg) { +#if defined(RDB_SST_INFO_USE_THREAD) + // Both the foreground and background threads can set the error message + // so lock the mutex to protect it. We only want the first error that + // we encounter. + const std::lock_guard guard(m_mutex); +#endif + my_printf_error(ER_UNKNOWN_ERROR, "[%s] bulk load error: %s", MYF(0), + sst_file_name.c_str(), msg.c_str()); + if (m_error_msg.empty()) { + m_error_msg = "[" + sst_file_name + "] " + msg; + } +} + +#if defined(RDB_SST_INFO_USE_THREAD) +// Static thread function - the Rdb_sst_info object is in 'object' +void Rdb_sst_info::thread_fcn(void *object) { + reinterpret_cast(object)->run_thread(); +} + +void Rdb_sst_info::run_thread() { + const std::unique_lock lk(m_mutex); + + do { + // Wait for notification or 1 second to pass + m_cond.wait_for(lk, std::chrono::seconds(1)); + + // Inner loop pulls off all Rdb_sst_file entries and processes them + while (!m_queue.empty()) { + const Rdb_sst_file *const sst_file = m_queue.front(); + m_queue.pop(); + + // Release the lock - we don't want to hold it while committing the file + lk.unlock(); + + // Close out the sst file and add it to the database + const rocksdb::Status s = sst_file->commit(); + if (!s.ok()) { + set_error_msg(sst_file->get_name(), s.ToString()); + } + + delete sst_file; + + // Reacquire the lock for the next inner loop iteration + lk.lock(); + } + + // If the queue is empty and the main thread has indicated we should exit + // break out of the loop. + } while (!m_finished); + + DBUG_ASSERT(m_queue.empty()); +} +#endif + +void Rdb_sst_info::init(const rocksdb::DB *const db) { + const std::string path = db->GetName() + FN_DIRSEP; + struct st_my_dir *const dir_info = my_dir(path.c_str(), MYF(MY_DONT_SORT)); + + // Access the directory + if (dir_info == nullptr) { + // NO_LINT_DEBUG + sql_print_warning("RocksDB: Could not access database directory: %s", + path.c_str()); + return; + } + + // Scan through the files in the directory + const struct fileinfo *file_info = dir_info->dir_entry; + for (uint ii= 0; ii < dir_info->number_of_files; ii++, file_info++) { + // find any files ending with m_suffix ... + const std::string name = file_info->name; + const size_t pos = name.find(m_suffix); + if (pos != std::string::npos && name.size() - pos == m_suffix.size()) { + // ... and remove them + const std::string fullname = path + name; + my_delete(fullname.c_str(), MYF(0)); + } + } + + // Release the directory entry + my_dirend(dir_info); +} + +std::atomic Rdb_sst_info::m_prefix_counter(0); +std::string Rdb_sst_info::m_suffix = ".bulk_load.tmp"; +} // namespace myrocks diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h new file mode 100644 index 0000000000000..4211ec6340d6e --- /dev/null +++ b/storage/rocksdb/rdb_sst_info.h @@ -0,0 +1,112 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include +#include + +/* RocksDB header files */ +#include "rocksdb/db.h" +#include "rocksdb/sst_file_writer.h" + +// define RDB_SST_INFO_USE_THREAD /* uncomment to use threads */ + +namespace myrocks { + +class Rdb_sst_file { +private: + Rdb_sst_file(const Rdb_sst_file &p) = delete; + Rdb_sst_file &operator=(const Rdb_sst_file &p) = delete; + + rocksdb::DB *const m_db; + rocksdb::ColumnFamilyHandle *const m_cf; + const rocksdb::DBOptions &m_db_options; + rocksdb::SstFileWriter *m_sst_file_writer; + const std::string m_name; + const bool m_tracing; + + std::string generateKey(const std::string &key); + +public: + Rdb_sst_file(rocksdb::DB *const db, rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const std::string &name, + const bool tracing); + ~Rdb_sst_file(); + + rocksdb::Status open(); + rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value); + rocksdb::Status commit(); + const std::string get_name() const { return m_name; } +}; + +class Rdb_sst_info { +private: + Rdb_sst_info(const Rdb_sst_info &p) = delete; + Rdb_sst_info &operator=(const Rdb_sst_info &p) = delete; + + rocksdb::DB *const m_db; + rocksdb::ColumnFamilyHandle *const m_cf; + const rocksdb::DBOptions &m_db_options; + uint64_t m_curr_size; + uint64_t m_max_size; + uint32_t m_sst_count; + std::string m_error_msg; + std::string m_prefix; + static std::atomic m_prefix_counter; + static std::string m_suffix; +#if defined(RDB_SST_INFO_USE_THREAD) + std::queue m_queue; + std::mutex m_mutex; + std::condition_variable m_cond; + std::thread *m_thread; + bool m_finished; +#endif + Rdb_sst_file *m_sst_file; + const bool m_tracing; + + int open_new_sst_file(); + void close_curr_sst_file(); + void set_error_msg(const std::string &sst_file_name, const std::string &msg); + +#if defined(RDB_SST_INFO_USE_THREAD) + void run_thread(); + + static void thread_fcn(void *object); +#endif + +public: + Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, + const std::string &indexname, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const bool &tracing); + ~Rdb_sst_info(); + + int put(const rocksdb::Slice &key, const rocksdb::Slice &value); + int commit(); + + const std::string &error_message() const { return m_error_msg; } + + static void init(const rocksdb::DB *const db); +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.cc b/storage/rocksdb/rdb_threads.cc new file mode 100644 index 0000000000000..b4bba8d30876c --- /dev/null +++ b/storage/rocksdb/rdb_threads.cc @@ -0,0 +1,83 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#include + +/* The C++ file's header */ +#include "./rdb_threads.h" + +namespace myrocks { + +void *Rdb_thread::thread_func(void *const thread_ptr) { + DBUG_ASSERT(thread_ptr != nullptr); + Rdb_thread *const thread = static_cast(thread_ptr); + if (!thread->m_run_once.exchange(true)) { + thread->setname(); + thread->run(); + thread->uninit(); + } + return nullptr; +} + +void Rdb_thread::init( +#ifdef HAVE_PSI_INTERFACE + my_core::PSI_mutex_key stop_bg_psi_mutex_key, + my_core::PSI_cond_key stop_bg_psi_cond_key +#endif + ) { + DBUG_ASSERT(!m_run_once); + mysql_mutex_init(stop_bg_psi_mutex_key, &m_signal_mutex, MY_MUTEX_INIT_FAST); + mysql_cond_init(stop_bg_psi_cond_key, &m_signal_cond, nullptr); +} + +void Rdb_thread::uninit() { + mysql_mutex_destroy(&m_signal_mutex); + mysql_cond_destroy(&m_signal_cond); +} + +int Rdb_thread::create_thread(const std::string &thread_name +#ifdef HAVE_PSI_INTERFACE + , + PSI_thread_key background_psi_thread_key +#endif + ) { + // Make a copy of the name so we can return without worrying that the + // caller will free the memory + m_name = thread_name; + + return mysql_thread_create(background_psi_thread_key, &m_handle, nullptr, + thread_func, this); + +} + +void Rdb_thread::signal(const bool &stop_thread) { + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + + if (stop_thread) { + m_stop = true; + } + + mysql_cond_signal(&m_signal_cond); + + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h new file mode 100644 index 0000000000000..a93e4fc93f2fc --- /dev/null +++ b/storage/rocksdb/rdb_threads.h @@ -0,0 +1,171 @@ +/* + Portions Copyright (c) 2015-Present, Facebook, Inc. + Portions Copyright (c) 2012, Monty Program Ab + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +/* C++ standard header files */ +#include + +/* MySQL includes */ +#include "./my_global.h" +#ifdef _WIN32 +#include +/* + Rocksdb implements their own pthread_key functions + undefine some my_pthread.h macros +*/ +#undef pthread_key_create +#undef pthread_key_delete +#undef pthread_setspecific +#undef pthread_getspecific +#endif +#include +#ifdef MARIAROCKS_NOT_YET +#include +#endif + +/* MyRocks header files */ +#include "./rdb_utils.h" + +namespace myrocks { + +class Rdb_thread { +private: + // Disable Copying + Rdb_thread(const Rdb_thread &); + Rdb_thread &operator=(const Rdb_thread &); + + // Make sure we run only once + std::atomic_bool m_run_once; + + pthread_t m_handle; + + std::string m_name; + +protected: + mysql_mutex_t m_signal_mutex; + mysql_cond_t m_signal_cond; + bool m_stop = false; + +public: + Rdb_thread() : m_run_once(false) {} + +#ifdef HAVE_PSI_INTERFACE + void init(my_core::PSI_mutex_key stop_bg_psi_mutex_key, + my_core::PSI_cond_key stop_bg_psi_cond_key); + int create_thread(const std::string &thread_name, + my_core::PSI_thread_key background_psi_thread_key); +#else + void init(); + int create_thread(const std::string &thread_name); +#endif + + virtual void run(void) = 0; + + void signal(const bool &stop_thread = false); + + int join() + { +#ifndef _WIN32 + return pthread_join(m_handle, nullptr); +#else + /* + mysys on Windows creates "detached" threads in pthread_create(). + + m_handle here is the thread id I(it is not reused by the OS + thus it is safe to state there can't be other thread with + the same id at this point). + + If thread is already finished before pthread_join(), + we get EINVAL, and it is safe to ignore and handle this as success. + */ + pthread_join(m_handle, nullptr); + return 0; +#endif + } + + void setname() { + /* + mysql_thread_create() ends up doing some work underneath and setting the + thread name as "my-func". This isn't what we want. Our intent is to name + the threads according to their purpose so that when displayed under the + debugger then they'll be more easily identifiable. Therefore we'll reset + the name if thread was successfully created. + */ + + /* + We originally had the creator also set the thread name, but that seems to + not work correctly in all situations. Having the created thread do the + pthread_setname_np resolves the issue. + */ + DBUG_ASSERT(!m_name.empty()); +#ifdef __linux__ + int err = pthread_setname_np(m_handle, m_name.c_str()); + if (err) + { + // NO_LINT_DEBUG + sql_print_warning( + "MyRocks: Failed to set name (%s) for current thread, errno=%d", + m_name.c_str(), errno); + } +#endif + } + + void uninit(); + + virtual ~Rdb_thread() {} + +private: + static void *thread_func(void *const thread_ptr); +}; + +/** + MyRocks background thread control + N.B. This is on top of RocksDB's own background threads + (@see rocksdb::CancelAllBackgroundWork()) +*/ + +class Rdb_background_thread : public Rdb_thread { +private: + bool m_save_stats = false; + + void reset() { + mysql_mutex_assert_owner(&m_signal_mutex); + m_stop = false; + m_save_stats = false; + } + +public: + virtual void run() override; + + void request_save_stats() { + RDB_MUTEX_LOCK_CHECK(m_signal_mutex); + + m_save_stats = true; + + RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex); + } +}; + +/* + Drop index thread control +*/ + +struct Rdb_drop_index_thread : public Rdb_thread { + virtual void run() override; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc new file mode 100644 index 0000000000000..5dff63cbf4cab --- /dev/null +++ b/storage/rocksdb/rdb_utils.cc @@ -0,0 +1,343 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include + +/* This C++ file's header */ +#include "./rdb_utils.h" + +/* C++ standard header files */ +#include +#include +#include +#include //psergey-merge + +/* C standard header files */ +#include + +/* MyRocks header files */ +#include "./ha_rocksdb.h" + +/* + Both innobase/include/ut0counter.h and rocksdb/port/port_posix.h define + CACHE_LINE_SIZE. +*/ +#ifdef CACHE_LINE_SIZE +# undef CACHE_LINE_SIZE +#endif + +/* RocksDB header files */ +#include "util/compression.h" + +namespace myrocks { + +/* + Skip past any spaces in the input +*/ +const char *rdb_skip_spaces(const struct charset_info_st *const cs, + const char *str) { + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + + while (my_isspace(cs, *str)) { + str++; + } + + return str; +} + +/* + Compare (ignoring case) to see if str2 is the next data in str1. + Note that str1 can be longer but we only compare up to the number + of characters in str2. +*/ +bool rdb_compare_strings_ic(const char *const str1, const char *const str2) { + DBUG_ASSERT(str1 != nullptr); + DBUG_ASSERT(str2 != nullptr); + + // Scan through the strings + size_t ii; + for (ii = 0; str2[ii]; ii++) { + if (toupper(static_cast(str1[ii])) != + toupper(static_cast(str2[ii]))) { + return false; + } + } + + return true; +} + +/* + Scan through an input string looking for pattern, ignoring case + and skipping all data enclosed in quotes. +*/ +const char *rdb_find_in_string(const char *str, const char *pattern, + bool *const succeeded) { + char quote = '\0'; + bool escape = false; + + DBUG_ASSERT(str != nullptr); + DBUG_ASSERT(pattern != nullptr); + DBUG_ASSERT(succeeded != nullptr); + + *succeeded = false; + + for (; *str; str++) { + /* If we found a our starting quote character */ + if (*str == quote) { + /* If it was escaped ignore it */ + if (escape) { + escape = false; + } + /* Otherwise we are now outside of the quoted string */ + else { + quote = '\0'; + } + } + /* Else if we are currently inside a quoted string? */ + else if (quote != '\0') { + /* If so, check for the escape character */ + escape = !escape && *str == '\\'; + } + /* Else if we found a quote we are starting a quoted string */ + else if (*str == '"' || *str == '\'' || *str == '`') { + quote = *str; + } + /* Else we are outside of a quoted string - look for our pattern */ + else { + if (rdb_compare_strings_ic(str, pattern)) { + *succeeded = true; + return str; + } + } + } + + // Return the character after the found pattern or the null terminateor + // if the pattern wasn't found. + return str; +} + +/* + See if the next valid token matches the specified string +*/ +const char *rdb_check_next_token(const struct charset_info_st *const cs, + const char *str, const char *const pattern, + bool *const succeeded) { + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + DBUG_ASSERT(pattern != nullptr); + DBUG_ASSERT(succeeded != nullptr); + + // Move past any spaces + str = rdb_skip_spaces(cs, str); + + // See if the next characters match the pattern + if (rdb_compare_strings_ic(str, pattern)) { + *succeeded = true; + return str + strlen(pattern); + } + + *succeeded = false; + return str; +} + +/* + Parse id +*/ +const char *rdb_parse_id(const struct charset_info_st *const cs, + const char *str, std::string *const id) { + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + + // Move past any spaces + str = rdb_skip_spaces(cs, str); + + if (*str == '\0') { + return str; + } + + char quote = '\0'; + if (*str == '`' || *str == '"') { + quote = *str++; + } + + size_t len = 0; + const char *start = str; + + if (quote != '\0') { + for (;;) { + if (*str == '\0') { + return str; + } + + if (*str == quote) { + str++; + if (*str != quote) { + break; + } + } + + str++; + len++; + } + } else { + while (!my_isspace(cs, *str) && *str != '(' && *str != ')' && *str != '.' && + *str != ',' && *str != '\0') { + str++; + len++; + } + } + + // If the user requested the id create it and return it + if (id != nullptr) { + *id = std::string(""); + id->reserve(len); + while (len--) { + *id += *start; + if (*start++ == quote) { + start++; + } + } + } + + return str; +} + +/* + Skip id +*/ +const char *rdb_skip_id(const struct charset_info_st *const cs, + const char *str) { + DBUG_ASSERT(cs != nullptr); + DBUG_ASSERT(str != nullptr); + + return rdb_parse_id(cs, str, nullptr); +} + +/* + Parses a given string into tokens (if any) separated by a specific delimiter. +*/ +const std::vector parse_into_tokens( + const std::string& s, const char delim) { + std::vector tokens; + std::string t; + std::stringstream ss(s); + + while (getline(ss, t, delim)) { + tokens.push_back(t); + } + + return tokens; +} + +static const std::size_t rdb_hex_bytes_per_char = 2; +static const std::array rdb_hexdigit = {{'0', '1', '2', '3', '4', '5', + '6', '7', '8', '9', 'a', 'b', + 'c', 'd', 'e', 'f'}}; + +/* + Convert data into a hex string with optional maximum length. + If the data is larger than the maximum length trancate it and append "..". +*/ +std::string rdb_hexdump(const char *data, const std::size_t data_len, + const std::size_t maxsize) { + DBUG_ASSERT(data != nullptr); + + // Count the elements in the string + std::size_t elems = data_len; + // Calculate the amount of output needed + std::size_t len = elems * rdb_hex_bytes_per_char; + std::string str; + + if (maxsize != 0 && len > maxsize) { + // If the amount of output is too large adjust the settings + // and leave room for the ".." at the end + elems = (maxsize - 2) / rdb_hex_bytes_per_char; + len = elems * rdb_hex_bytes_per_char + 2; + } + + // Reserve sufficient space to avoid reallocations + str.reserve(len); + + // Loop through the input data and build the output string + for (std::size_t ii = 0; ii < elems; ii++, data++) { + uint8_t ch = (uint8_t)*data; + str += rdb_hexdigit[ch >> 4]; + str += rdb_hexdigit[ch & 0x0F]; + } + + // If we can't fit it all add the ".." + if (elems != data_len) { + str += ".."; + } + + return str; +} + +/* + Attempt to access the database subdirectory to see if it exists +*/ +bool rdb_database_exists(const std::string &db_name) { + const std::string dir = + std::string(mysql_real_data_home) + FN_DIRSEP + db_name; + struct st_my_dir *const dir_info = + my_dir(dir.c_str(), MYF(MY_DONT_SORT | MY_WANT_STAT)); + if (dir_info == nullptr) { + return false; + } + + my_dirend(dir_info); + return true; +} + + +/* + @brief + Return a comma-separated string with compiled-in compression types. + Not thread-safe. +*/ +const char *get_rocksdb_supported_compression_types() +{ + static std::string compression_methods_buf; + static bool inited=false; + if (!inited) + { + inited= true; + std::vector known_types= + { + rocksdb::kSnappyCompression, + rocksdb::kZlibCompression, + rocksdb::kBZip2Compression, + rocksdb::kLZ4Compression, + rocksdb::kLZ4HCCompression, + rocksdb::kXpressCompression, + rocksdb::kZSTDNotFinalCompression + }; + + for (auto typ : known_types) + { + if (CompressionTypeSupported(typ)) + { + if (compression_methods_buf.size()) + compression_methods_buf.append(","); + compression_methods_buf.append(CompressionTypeToString(typ)); + } + } + } + return compression_methods_buf.c_str(); +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h new file mode 100644 index 0000000000000..71ec8ef54abea --- /dev/null +++ b/storage/rocksdb/rdb_utils.h @@ -0,0 +1,301 @@ +/* + Copyright (c) 2016, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#pragma once + +#include "rdb_mariadb_port.h" + +/* C++ standard header files */ +#include +#include +#include + +/* MySQL header files */ +#include "../sql/log.h" +#include "./my_stacktrace.h" +#include "./sql_string.h" + +/* RocksDB header files */ +#include "rocksdb/slice.h" + +#ifdef HAVE_JEMALLOC +#include +#endif + +namespace myrocks { + +/* + Guess what? + An interface is a class where all members are public by default. +*/ + +#ifndef interface +#define interface struct +#endif // interface + +/* + Introduce C-style pseudo-namespaces, a handy way to make code more readble + when calling into a legacy API, which does not have any namespace defined. + Since we cannot or don't want to change the API in any way, we can use this + mechanism to define readability tokens that look like C++ namespaces, but are + not enforced in any way by the compiler, since the pre-compiler strips them + out. However, on the calling side, code looks like my_core::thd_ha_data() + rather than plain a thd_ha_data() call. This technique adds an immediate + visible cue on what type of API we are calling into. +*/ + +#ifndef my_core +// C-style pseudo-namespace for MySQL Core API, to be used in decorating calls +// to non-obvious MySQL functions, like the ones that do not start with well +// known prefixes: "my_", "sql_", and "mysql_". +#define my_core +#endif // my_core + +/* + The intent behind a SHIP_ASSERT() macro is to have a mechanism for validating + invariants in retail builds. Traditionally assertions (such as macros defined + in ) are evaluated for performance reasons only in debug builds and + become NOOP in retail builds when NDEBUG is defined. + + This macro is intended to validate the invariants which are critical for + making sure that data corruption and data loss won't take place. Proper + intended usage can be described as "If a particular condition is not true then + stop everything what's going on and terminate the process because continued + execution will cause really bad things to happen". + + Use the power of SHIP_ASSERT() wisely. +*/ + +#ifndef SHIP_ASSERT +#define SHIP_ASSERT(expr) \ + do { \ + if (!(expr)) { \ + my_safe_printf_stderr("\nShip assert failure: \'%s\'\n", #expr); \ + abort_with_stack_traces(); \ + } \ + } while (0) +#endif // SHIP_ASSERT + +/* + Assert a implies b. + If a is true, then b must be true. + If a is false, then the value is b does not matter. +*/ +#ifndef DBUG_ASSERT_IMP +#define DBUG_ASSERT_IMP(a, b) DBUG_ASSERT(!(a) || (b)) +#endif + +/* + Assert a if and only if b. + a and b must be both true or both false. +*/ +#ifndef DBUG_ASSERT_IFF +#define DBUG_ASSERT_IFF(a, b) \ + DBUG_ASSERT(static_cast(a) == static_cast(b)) +#endif + + +/* + Portability: use __PRETTY_FUNCTION__ when available, otherwise use __func__ + which is in the standard. +*/ + +#ifdef __GNUC__ +# define __MYROCKS_PORTABLE_PRETTY_FUNCTION__ __PRETTY_FUNCTION__ +#else +# define __MYROCKS_PORTABLE_PRETTY_FUNCTION__ __func__ +#endif + +/* + Intent behind this macro is to avoid manually typing the function name every + time we want to add the debugging statement and use the compiler for this + work. This avoids typical refactoring problems when one renames a function, + but the tracing message doesn't get updated. + + We could use __func__ or __FUNCTION__ macros, but __PRETTY_FUNCTION__ + contains the signature of the function as well as its bare name and provides + therefore more context when interpreting the logs. +*/ +#define DBUG_ENTER_FUNC() DBUG_ENTER(__MYROCKS_PORTABLE_PRETTY_FUNCTION__) + +/* + Error handling pattern used across MySQL abides by the following rules: "All + functions that can report an error (usually an allocation error), should + return 0/FALSE/false on success, 1/TRUE/true on failure." + + https://dev.mysql.com/doc/internals/en/additional-suggestions.html has more + details. + + To increase the comprehension and readability of MyRocks codebase we'll use + constants similar to ones from C standard (EXIT_SUCCESS and EXIT_FAILURE) to + make sure that both failure and success paths are clearly identifiable. The + definitions of FALSE and TRUE come from . +*/ +#define HA_EXIT_SUCCESS FALSE +#define HA_EXIT_FAILURE TRUE + +/* + Macros to better convey the intent behind checking the results from locking + and unlocking mutexes. +*/ +#define RDB_MUTEX_LOCK_CHECK(m) \ + rdb_check_mutex_call_result(__MYROCKS_PORTABLE_PRETTY_FUNCTION__, true, \ + mysql_mutex_lock(&m)) +#define RDB_MUTEX_UNLOCK_CHECK(m) \ + rdb_check_mutex_call_result(__MYROCKS_PORTABLE_PRETTY_FUNCTION__, false, \ + mysql_mutex_unlock(&m)) + +/* + Generic constant. +*/ +const size_t RDB_MAX_HEXDUMP_LEN = 1000; + +/* + Helper function to get an NULL terminated uchar* out of a given MySQL String. +*/ + +inline uchar *rdb_mysql_str_to_uchar_str(my_core::String *str) { + DBUG_ASSERT(str != nullptr); + return reinterpret_cast(str->c_ptr()); +} + +/* + Helper function to get plain (not necessary NULL terminated) uchar* out of a + given STL string. +*/ + +inline const uchar *rdb_std_str_to_uchar_ptr(const std::string &str) { + return reinterpret_cast(str.data()); +} + +/* + Helper function to convert seconds to milliseconds. +*/ + +constexpr int rdb_convert_sec_to_ms(int sec) { + return std::chrono::milliseconds(std::chrono::seconds(sec)).count(); +} + +/* + Helper function to get plain (not necessary NULL terminated) uchar* out of a + given RocksDB item. +*/ + +inline const uchar *rdb_slice_to_uchar_ptr(const rocksdb::Slice *item) { + DBUG_ASSERT(item != nullptr); + return reinterpret_cast(item->data()); +} + +/* + Call this function in cases when you can't rely on garbage collector and need + to explicitly purge all unused dirty pages. This should be a relatively rare + scenario for cases where it has been verified that this intervention has + noticeable benefits. +*/ +inline int purge_all_jemalloc_arenas() { +#ifdef HAVE_JEMALLOC + unsigned narenas = 0; + size_t sz = sizeof(unsigned); + char name[25] = {0}; + + // Get the number of arenas first. Please see `jemalloc` documentation for + // all the various options. + int result = mallctl("arenas.narenas", &narenas, &sz, nullptr, 0); + + // `mallctl` returns 0 on success and we really want caller to know if all the + // trickery actually works. + if (result) { + return result; + } + + // Form the command to be passed to `mallctl` and purge all the unused dirty + // pages. + snprintf(name, sizeof(name) / sizeof(char), "arena.%d.purge", narenas); + result = mallctl(name, nullptr, nullptr, nullptr, 0); + + return result; +#else + return EXIT_SUCCESS; +#endif +} + +/* + Helper function to check the result of locking or unlocking a mutex. We'll + intentionally abort in case of a failure because it's better to terminate + the process instead of continuing in an undefined state and corrupting data + as a result. +*/ +inline void rdb_check_mutex_call_result(const char *function_name, + const bool attempt_lock, + const int result) { + if (unlikely(result)) { + /* NO_LINT_DEBUG */ + sql_print_error("%s a mutex inside %s failed with an " + "error code %d.", + attempt_lock ? "Locking" : "Unlocking", function_name, + result); + + // This will hopefully result in a meaningful stack trace which we can use + // to efficiently debug the root cause. + abort_with_stack_traces(); + } +} + +/* + Helper functions to parse strings. +*/ + +const char *rdb_skip_spaces(const struct charset_info_st *const cs, + const char *str) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +bool rdb_compare_strings_ic(const char *const str1, const char *const str2) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +const char *rdb_find_in_string(const char *str, const char *pattern, + bool *const succeeded) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +const char *rdb_check_next_token(const struct charset_info_st *const cs, + const char *str, const char *const pattern, + bool *const succeeded) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +const char *rdb_parse_id(const struct charset_info_st *const cs, + const char *str, std::string *const id) + MY_ATTRIBUTE((__nonnull__(1, 2), __warn_unused_result__)); + +const char *rdb_skip_id(const struct charset_info_st *const cs, const char *str) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + +const std::vector parse_into_tokens(const std::string& s, + const char delim); + +/* + Helper functions to populate strings. +*/ + +std::string rdb_hexdump(const char *data, const std::size_t data_len, + const std::size_t maxsize = 0) + MY_ATTRIBUTE((__nonnull__)); + +/* + Helper function to see if a database exists + */ +bool rdb_database_exists(const std::string &db_name); + +const char *get_rocksdb_supported_compression_types(); + +} // namespace myrocks diff --git a/storage/rocksdb/rocksdb b/storage/rocksdb/rocksdb new file mode 160000 index 0000000000000..ba4c77bd6b16e --- /dev/null +++ b/storage/rocksdb/rocksdb @@ -0,0 +1 @@ +Subproject commit ba4c77bd6b16ea493c555561ed2e59bdc4c15fc0 diff --git a/storage/rocksdb/rocksdb-range-access.txt b/storage/rocksdb/rocksdb-range-access.txt new file mode 100644 index 0000000000000..c974279ac7709 --- /dev/null +++ b/storage/rocksdb/rocksdb-range-access.txt @@ -0,0 +1,353 @@ + +This file describes how MySQL index navigation commands are translated into +RocksDB index navigation commands. + +Index tuples are shown as + + ( kv )-aaa-pkN + +here + * '(kv)' is the 4-byte index number. + * '-' is just for readability + * everything that follows the '-' is mem-comparable form of the key. + In ascii encoding, aaa < bbb < ccc < xxx. + +Tuples that start with '#' do not exist in the database. They are only shown +to demonstrate where Seek() calls end up with. + +== HA_READ_KEY_EXACT, forward CF == + + (kv-1)-xxx-pk +# ( kv )-aaa <-- "kv-aaa" doesn't exist in the database, but it would be + here. + ( kv )-aaa-pk <--- Seek("kv-aaa") will put us here on the next record. + ( kv )-aaa-pk2 + ( kv )-bbb-... + +RocksDB calls: + + it->Seek(kv); + if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + return record. + +== HA_READ_KEY_EXACT, backward CF == + +When we need to seek to a tuple that is a prefix of a full key: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- We need to be here +# ( kv )-bbb <---we call Seek(kv-bbb) + ( kv )-aaa-pk ... and end up here. Should call it->Prev(). + +There is a special case when (kv)-bbb-pk1 is the last record in the CF, and +we get invalid iterator. Then, we need to call SeekToLast(). + +Another kind of special case is when we need to seek to the full value. +Suppose, the lookup tuple is kv-bbb-pk1: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1) + ( kv )-bbb-pk0 + +Then, Seek(kv-bbb-pk1) will position us exactly the tuple we need, and we +won't need to call it->Prev(). If we get an invalid iterator, there is no +need to call SeekToLast(). + +RocksDB calls: + + it->Seek(tuple); + + if (!using_full_key) + { + if (!it->Valid()) + it->SeekToLast(); + else + it->Prev(); + } + + if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + return record. + +== HA_READ_KEY_OR_NEXT, forward CF == + +This is finding min(key) such that key >= lookup_tuple. + +If lookup tuple is kv-bbb: + + ( kv )-aaa-pk +# ( kv )-bbb <-- "kv-bbb" doesn't exist in the database, but it would be + here. + ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here on the next record. + ( kv )-bbb-pk2 + ( kv )-bbb-... + +RocksDB calls: + + Seek(kv); + if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + return record. + +== HA_READ_KEY_OR_NEXT, backward CF == + +When specified key tuple is a key prefix: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- We need to be here (or above) +# ( kv )-bbb <---we call Seek(kv-bbb) + ( kv )-aaa-pk ... and end up here. Should call it->Prev(). + +There is a special case when (kv)-bbb-pk1 is the last record in the CF, and +we get invalid iterator. Then, we need to call SeekToLast(). + +Another kind of special case is when we need to seek to the full value. +Suppose, the lookup tuple is kv-bbb-pk1: + + (kv+1)-xxx-pk + ( kv )-ccc-pk + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1) + ( kv )-bbb-pk0 + +Then, Seek(kv-bbb-pk1) may position us exactly at the tuple we need, and we +won't need to call it->Prev(). +If kv-bbb-pk1 is not present in the database, we will be positioned on +kv-bbb-pk0, and we will need to call it->Prev(). +If we get an invalid iterator, we DO need to call SeekToLast(). + +RocksDB calls: + + Seek(...); + + if (!it->Valid()) + it->SeekToLast(); + else + { + if (!using_full_key || + !(kd->covers_key(...) || kd->cmp_full_keys(...)) + it->Prev(); + } + + if (it->Valid() && kd->covers_key(..)) + return record. + +== HA_READ_AFTER_KEY, forward CF == + +This is finding min(key) such that key > lookup_key. + +Suppose lookup_key = kv-bbb + + ( kv )-aaa-pk +# ( kv )-bbb + ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here. We need to + ( kv )-bbb-pk2 get to the value that is next after 'bbb'. + ( kv )-bbb-pk3 + ( kv )-bbb-pk4 + ( kv )-bbb-pk5 + ( kv )-ccc-pkN <-- That is, we need to be here. + +However, we don't know that the next value is kv-ccc. Instead, we seek to the +first value that strictly greater than 'kv-bbb'. It is Successor(kv-bbb). + +It doesn't matter if we're using a full extended key or not. + +RocksDB calls: + + Seek(Successor(kv-bbb)); + if (it->Valid() && kd->covers_key(it.key())) + return record; + +Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that +we seek to Successor($lookup_key) instead of $lookup_key itself. + +== HA_READ_AFTER_KEY, backward CF == + +Suppose, the lookup key is 'kv-bbb': + + (kv+1)-xxx-pk + ( kv )-ccc-pk7 + ( kv )-ccc-pk6 <-- We need to be here. +# Successor(kv-bbb) <-- We get here when we call Seek(Successor(kv-bbb)) + ( kv )-bbb-pk5 and we will need to call Prev() (*) + ( kv )-bbb-pk4 + ( kv )-bbb-pk3 + ( kv )-bbb-pk2 + ( kv )-bbb-pk1 +# ( kv )-bbb <-- We would get here if we called Seek(kv-bbb). + ( kv )-aaa-pk + +(*) - unless Successor(kv-bbb)=(kv-ccc), and Seek(kv-ccc) hits the row. In +that case, we won't need to call Prev(). + +RocksDB calls: + + Seek(Successor(kv-bbb)); + if (!it->Valid()) + { + /* + We may get EOF if rows with 'kv-bbb' (below the Successor... line in the + diagram) do not exist. This doesn't mean that rows with values kv-ccc + do not exist. + */ + it->SeekToLast(); + } + else + { + if (!using_full_key || + !kd->value_matches_prefix(...)) + { + it->Prev(); + } + } + + if (it->Valid() && kd->covers_key(...)) + return record. + +Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that +we seek to Successor($lookup_key) instead of $lookup_key itself. + + +== HA_READ_BEFORE_KEY, forward CF == + +This is finding max(key) such that key < lookup_tuple. + +Suppose, lookup_tuple=kv-bbb. + + ( kv )-aaa-pk1 + ( kv )-aaa-pk2 + ( kv )-aaa-pk3 <-- Need to be here. +# ( kv )-bbb + ( kv )-bbb-pk4 <-- Seek("kv-bbb") will put us here. + ( kv )-bbb-pk5 + ( kv )-bbb-pk6 + +1. Seek(kv-bbb) will put us at kv-bbb-pk4 (or return an invalid iterator + if kv-bbb-pk4 and subsequent rows do not exist in the db). +2. We will need to call Prev() to get to the record before. + (if there is no record before kv-bbb, then we can't find a record). + +It doesn't matter if we're using a full extended key or not. + +RocksDB calls: + + it->Seek(kv-bbb); + if (it->Valid()) + it->Prev(); + else + it->SeekToLast(); + + if (it->Valid() && kd->covers_key(...)) + return record; + + +== HA_READ_BEFORE_KEY, backward CF == + +This is finding max(key) such that key < lookup_tuple. +Suppose, lookup_tuple=kv-bbb, a prefix of the full key. + + ( kv )-bbb-pk6 + ( kv )-bbb-pk5 + ( kv )-bbb-pk4 +# ( kv )-bbb + ( kv )-aaa-pk3 <-- Need to be here, and Seek("kv-bbb") will put us here + ( kv )-aaa-pk2 + ( kv )-aaa-pk1 + +If the lookup tuple is a full key (e.g. kv-bbb-pk4), and the key is present in +the database, the iterator will be positioned on the key. We will need to call +Next() to get the next key. + +RocksDB calls: + + it->Seek(kv-bbb); + + if (it->Valid() && using_full_key && + kd->value_matches_prefix(...)) + { + /* We are using full key and we've hit an exact match */ + it->Next(); + } + + if (it->Valid() && kd->covers_key(...)) + return record; + +== HA_READ_PREFIX_LAST, forward CF == + +Find the last record with the specified index prefix lookup_tuple. + +Suppose, lookup_tuple='kv-bbb' + + ( kv )-aaa-pk2 + ( kv )-aaa-pk3 +# ( kv )-bbb + ( kv )-bbb-pk4 + ( kv )-bbb-pk5 + ( kv )-bbb-pk6 + ( kv )-bbb-pk7 <--- Need to be here. +# ( kv )-ccc + ( kv )-ccc-pk8 <-- Seek(Successor(kv-bbb)) will get us here. will need + ( kv )-ccc-pk9 to call Prev(). + +RocksDB calls: + + Seek(Successor(kv-bbb)); + if (!it->Valid()) + it->SeekToLast(); + else + it->Prev(); + + if (it->Valid() && kd->covers_key(...)) + { + if (!cmp_full_keys(lookup_tuple)) // not needed in _OR_PREV + { + // the record's prefix matches lookup_tuple. + return record; + } + } + +== HA_READ_PREFIX_LAST, backward CF == + +Suppose, lookup_tuple='kv-bbb' + + ( kv )-ccc-pk9 + ( kv )-ccc-pk8 +# ( kv )-ccc <-- 2. Seek(Successor(kv-bbb)) will point here + and it will fall down to the next row. + ( kv )-bbb-pk7 <--- 1. Need to be here. + ( kv )-bbb-pk6 + ( kv )-bbb-pk5 + ( kv )-bbb-pk4 +# ( kv )-bbb + ( kv )-aaa-pk3 + ( kv )-aaa-pk2 + + +RocksDB calls: + + it->Seek(Successor(kv-bbb)); + + if (using_full_key && it->Valid() && !cmp_full_keys(Sucessor(lookup_key))) + it->Next(); + + if (it->Valid() && kd->covers_key(..)) + { + if (!cmp_full_keys(...)) // not needed in _OR_PREV + { + // the record's prefix matches lookup_tuple. + return record; + } + } + +== HA_READ_PREFIX_LAST_OR_PREV, forward or backward CF == + +This is just like HA_READ_PREFIX_LAST but we don't need to check that the key +we've got is in the search prefix. (search for "not needed in _OR_PREV" above) diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc new file mode 100644 index 0000000000000..ac61eb4f257d8 --- /dev/null +++ b/storage/rocksdb/tools/mysql_ldb.cc @@ -0,0 +1,18 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include +#include "../rdb_comparator.h" +#include "rocksdb/ldb_tool.h" + +int main(int argc, char **argv) { + rocksdb::Options db_options; + myrocks::Rdb_pk_comparator pk_comparator; + db_options.comparator = &pk_comparator; + + rocksdb::LDBTool tool; + tool.Run(argc, argv, db_options); + return 0; +} diff --git a/storage/rocksdb/unittest/CMakeLists.txt b/storage/rocksdb/unittest/CMakeLists.txt new file mode 100644 index 0000000000000..de8d0d82aead3 --- /dev/null +++ b/storage/rocksdb/unittest/CMakeLists.txt @@ -0,0 +1,22 @@ +IF (TARGET rocksdb) + INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/zlib + ${CMAKE_SOURCE_DIR}/unittest/mytap + ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src + ) + LINK_LIBRARIES(mytap mysys dbug strings) + + ADD_DEFINITIONS(-DSTANDALONE_UNITTEST) + + MYSQL_ADD_EXECUTABLE(test_properties_collector + test_properties_collector.cc + ) + TARGET_LINK_LIBRARIES(test_properties_collector mysqlserver) + + # Necessary to make sure that we can use the jemalloc API calls. + GET_TARGET_PROPERTY(mysql_embedded LINK_FLAGS PREV_LINK_FLAGS) + IF(NOT PREV_LINK_FLAGS) + SET(PREV_LINK_FLAGS) + ENDIF() + SET_TARGET_PROPERTIES(test_properties_collector PROPERTIES LINK_FLAGS + "${PREV_LINK_FLAGS} ${WITH_MYSQLD_LDFLAGS}") +ENDIF() diff --git a/storage/rocksdb/unittest/test_properties_collector.cc b/storage/rocksdb/unittest/test_properties_collector.cc new file mode 100644 index 0000000000000..46a3badc6eeab --- /dev/null +++ b/storage/rocksdb/unittest/test_properties_collector.cc @@ -0,0 +1,54 @@ +/* + Copyright (c) 2015, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* MyRocks header files */ +#include "../ha_rocksdb.h" +#include "../rdb_datadic.h" + +void putKeys(myrocks::Rdb_tbl_prop_coll *coll, int num, bool is_delete, + uint64_t expected_deleted) { + std::string str("aaaaaaaaaaaaaa"); + rocksdb::Slice sl(str.data(), str.size()); + + for (int i = 0; i < num; i++) { + coll->AddUserKey( + sl, sl, is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut, 0, 100); + } + DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted); +} + +int main(int argc, char **argv) { + // test the circular buffer for delete flags + myrocks::Rdb_compact_params params; + params.m_file_size = 333; + params.m_deletes = 333; // irrelevant + params.m_window = 10; + + myrocks::Rdb_tbl_prop_coll coll(nullptr, params, 0, + RDB_DEFAULT_TBL_STATS_SAMPLE_PCT); + + putKeys(&coll, 2, true, 2); // [xx] + putKeys(&coll, 3, false, 2); // [xxo] + putKeys(&coll, 1, true, 3); // [xxox] + putKeys(&coll, 6, false, 3); // [xxoxoooooo] + putKeys(&coll, 3, true, 4); // xxo[xooooooxxx] + putKeys(&coll, 1, false, 4); // xxox[ooooooxxxo] + putKeys(&coll, 100, false, 4); // ....[oooooooooo] + putKeys(&coll, 100, true, 10); // ....[xxxxxxxxxx] + putKeys(&coll, 100, true, 10); // ....[oooooooooo] + + return 0; +} diff --git a/storage/rocksdb/ut0counter.h b/storage/rocksdb/ut0counter.h new file mode 100644 index 0000000000000..af2e023af27eb --- /dev/null +++ b/storage/rocksdb/ut0counter.h @@ -0,0 +1,203 @@ +/* +Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved. +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA +*****************************************************************************/ + +/**************************************************//** +@file include/ut0counter.h +Counter utility class +Created 2012/04/12 by Sunny Bains +*******************************************************/ + +#ifndef UT0COUNTER_H +#define UT0COUNTER_H + +#include + +/** CPU cache line size */ +#define CACHE_LINE_SIZE 64 + +/** Default number of slots to use in ib_counter_t */ +#define IB_N_SLOTS 64 + +#ifdef __WIN__ +#define get_curr_thread_id() GetCurrentThreadId() +#else +#define get_curr_thread_id() pthread_self() +#endif + +#define UT_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) + +/** Get the offset into the counter array. */ +template +struct generic_indexer_t { + /** Default constructor/destructor should be OK. */ + + /** @return offset within m_counter */ + size_t offset(size_t index) const { + return(((index % N) + 1) * (CACHE_LINE_SIZE / sizeof(Type))); + } +}; + +#ifdef HAVE_SCHED_GETCPU +//#include // Including this causes problems with EMPTY symbol +#include // Include this instead +/** Use the cpu id to index into the counter array. If it fails then +use the thread id. */ +template +struct get_sched_indexer_t : public generic_indexer_t { + /** Default constructor/destructor should be OK. */ + + /* @return result from sched_getcpu(), the thread id if it fails. */ + size_t get_rnd_index() const { + + size_t cpu = sched_getcpu(); + if (cpu == (size_t) -1) { + cpu = get_curr_thread_id(); + } + + return(cpu); + } +}; +#endif /* HAVE_SCHED_GETCPU */ + +/** Use the thread id to index into the counter array. */ +template +struct thread_id_indexer_t : public generic_indexer_t { + /** Default constructor/destructor should are OK. */ + + /* @return a random number, currently we use the thread id. Where + thread id is represented as a pointer, it may not work as + effectively. */ + size_t get_rnd_index() const { + return get_curr_thread_id(); + } +}; + +/** For counters wher N=1 */ +template +struct single_indexer_t { + /** Default constructor/destructor should are OK. */ + + /** @return offset within m_counter */ + size_t offset(size_t index) const { + DBUG_ASSERT(N == 1); + return((CACHE_LINE_SIZE / sizeof(Type))); + } + + /* @return 1 */ + size_t get_rnd_index() const { + DBUG_ASSERT(N == 1); + return(1); + } +}; + +/** Class for using fuzzy counters. The counter is not protected by any +mutex and the results are not guaranteed to be 100% accurate but close +enough. Creates an array of counters and separates each element by the +CACHE_LINE_SIZE bytes */ +template < + typename Type, + int N = IB_N_SLOTS, + template class Indexer = thread_id_indexer_t> +class ib_counter_t { +public: + ib_counter_t() { memset(m_counter, 0x0, sizeof(m_counter)); } + + ~ib_counter_t() + { + DBUG_ASSERT(validate()); + } + + bool validate() { +#ifdef UNIV_DEBUG + size_t n = (CACHE_LINE_SIZE / sizeof(Type)); + + /* Check that we aren't writing outside our defined bounds. */ + for (size_t i = 0; i < UT_ARRAY_SIZE(m_counter); i += n) { + for (size_t j = 1; j < n - 1; ++j) { + DBUG_ASSERT(m_counter[i + j] == 0); + } + } +#endif /* UNIV_DEBUG */ + return(true); + } + + /** If you can't use a good index id. Increment by 1. */ + void inc() { add(1); } + + /** If you can't use a good index id. + * @param n - is the amount to increment */ + void add(Type n) { + size_t i = m_policy.offset(m_policy.get_rnd_index()); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] += n; + } + + /** Use this if you can use a unique indentifier, saves a + call to get_rnd_index(). + @param i - index into a slot + @param n - amount to increment */ + void add(size_t index, Type n) { + size_t i = m_policy.offset(index); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] += n; + } + + /** If you can't use a good index id. Decrement by 1. */ + void dec() { sub(1); } + + /** If you can't use a good index id. + * @param - n is the amount to decrement */ + void sub(Type n) { + size_t i = m_policy.offset(m_policy.get_rnd_index()); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] -= n; + } + + /** Use this if you can use a unique indentifier, saves a + call to get_rnd_index(). + @param i - index into a slot + @param n - amount to decrement */ + void sub(size_t index, Type n) { + size_t i = m_policy.offset(index); + + DBUG_ASSERT(i < UT_ARRAY_SIZE(m_counter)); + + m_counter[i] -= n; + } + + /* @return total value - not 100% accurate, since it is not atomic. */ + operator Type() const { + Type total = 0; + + for (size_t i = 0; i < N; ++i) { + total += m_counter[m_policy.offset(i)]; + } + + return(total); + } + +private: + /** Indexer into the array */ + Indexerm_policy; + + /** Slot 0 is unused. */ + Type m_counter[(N + 1) * (CACHE_LINE_SIZE / sizeof(Type))]; +}; + +#endif /* UT0COUNTER_H */ diff --git a/storage/xtradb/btr/btr0scrub.cc b/storage/xtradb/btr/btr0scrub.cc index 560d2ece6c03c..e9434c9f77839 100644 --- a/storage/xtradb/btr/btr0scrub.cc +++ b/storage/xtradb/btr/btr0scrub.cc @@ -129,15 +129,15 @@ btr_scrub_lock_dict_func(ulint space_id, bool lock_to_close_table, * if we don't lock to close a table, we check if space * is closing, and then instead give up */ - if (lock_to_close_table == false) { - fil_space_t* space = fil_space_acquire(space_id); - if (!space || space->stop_new_ops) { - if (space) { - fil_space_release(space); - } + if (lock_to_close_table) { + } else if (fil_space_t* space = fil_space_acquire(space_id)) { + bool stopping = space->is_stopping(); + fil_space_release(space); + if (stopping) { return false; } - fil_space_release(space); + } else { + return false; } os_thread_sleep(250000); @@ -197,18 +197,15 @@ btr_scrub_table_close_for_thread( return; } - fil_space_t* space = fil_space_acquire(scrub_data->space); - - /* If tablespace is not marked as stopping perform - the actual close. */ - if (space && !space->is_stopping()) { - mutex_enter(&dict_sys->mutex); - /* perform the actual closing */ - btr_scrub_table_close(scrub_data->current_table); - mutex_exit(&dict_sys->mutex); - } - - if (space) { + if (fil_space_t* space = fil_space_acquire(scrub_data->space)) { + /* If tablespace is not marked as stopping perform + the actual close. */ + if (!space->is_stopping()) { + mutex_enter(&dict_sys->mutex); + /* perform the actual closing */ + btr_scrub_table_close(scrub_data->current_table); + mutex_exit(&dict_sys->mutex); + } fil_space_release(space); } diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index c9a3f6aa6ecb9..70cd9610b1848 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -6413,14 +6413,12 @@ buf_page_decrypt_after_read( return (true); } - fil_space_t* space = fil_space_acquire(bpage->space); - - fil_space_crypt_t* crypt_data = space->crypt_data; + fil_space_t* space = fil_space_acquire(bpage->space, true); /* Page is encrypted if encryption information is found from tablespace and page contains used key_version. This is true also for pages first compressed and then encrypted. */ - if (!crypt_data) { + if (!space || !space->crypt_data) { key_version = 0; } @@ -6504,6 +6502,8 @@ buf_page_decrypt_after_read( } } - fil_space_release(space); + if (space != NULL) { + fil_space_release(space); + } return (success); } diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index a116bfad99d7f..e7244d719c810 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -6389,16 +6389,12 @@ fil_flush( { mutex_enter(&fil_system->mutex); - fil_space_t* space = fil_space_get_by_id(space_id); - - if (!space || space->stop_new_ops) { - mutex_exit(&fil_system->mutex); - - return; + if (fil_space_t* space = fil_space_get_by_id(space_id)) { + if (!space->is_stopping()) { + fil_flush_low(space); + } } - fil_flush_low(space); - mutex_exit(&fil_system->mutex); } @@ -6438,8 +6434,7 @@ fil_flush_file_spaces( space; space = UT_LIST_GET_NEXT(unflushed_spaces, space)) { - if (space->purpose == purpose && !space->stop_new_ops) { - + if (space->purpose == purpose && !space->is_stopping()) { space_ids[n_space_ids++] = space->id; } } @@ -7388,12 +7383,13 @@ Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID @param[in] silent whether to silently ignore missing tablespaces -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ inline fil_space_t* -fil_space_acquire_low( - ulint id, - bool silent) +fil_space_acquire_low(ulint id, bool silent, bool for_io = false) { fil_space_t* space; @@ -7407,7 +7403,7 @@ fil_space_acquire_low( " tablespace " ULINTPF ".", id); ut_error; } - } else if (space->stop_new_ops) { + } else if (!for_io && space->is_stopping()) { space = NULL; } else { space->n_pending_ops++; @@ -7422,22 +7418,24 @@ fil_space_acquire_low( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io) { - return(fil_space_acquire_low(id, false)); + return(fil_space_acquire_low(id, false, for_io)); } /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) { return(fil_space_acquire_low(id, true)); } @@ -7445,8 +7443,7 @@ fil_space_acquire_silent( /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space) +fil_space_release(fil_space_t* space) { mutex_enter(&fil_system->mutex); ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); @@ -7464,8 +7461,7 @@ If NULL, use the first fil_space_t on fil_system->space_list. @return pointer to the next fil_space_t. @retval NULL if this was the last*/ fil_space_t* -fil_space_next( - fil_space_t* prev_space) +fil_space_next(fil_space_t* prev_space) { fil_space_t* space=prev_space; @@ -7488,8 +7484,8 @@ fil_space_next( fil_ibd_create(), or dropped, or !tablespace. */ while (space != NULL && (UT_LIST_GET_LEN(space->chain) == 0 - || space->stop_new_ops - || space->purpose != FIL_TABLESPACE)) { + || space->is_stopping() + || space->purpose != FIL_TABLESPACE)) { space = UT_LIST_GET_NEXT(space_list, space); } diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index b80df0573517e..698039afede1d 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -650,27 +650,28 @@ fil_write_flushed_lsn_to_data_files( Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@param[in] for_io whether to look up the tablespace while performing I/O + (possibly executing TRUNCATE) +@return the tablespace +@retval NULL if missing or being deleted or truncated */ fil_space_t* -fil_space_acquire( - ulint id) +fil_space_acquire(ulint id, bool for_io = false) MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace that may not exist. Used by background threads that do not necessarily hold proper locks for concurrency control. @param[in] id tablespace ID -@return the tablespace, or NULL if missing or being deleted */ +@return the tablespace +@retval NULL if missing or being deleted */ fil_space_t* -fil_space_acquire_silent( - ulint id) +fil_space_acquire_silent(ulint id) MY_ATTRIBUTE((warn_unused_result)); /** Release a tablespace acquired with fil_space_acquire(). @param[in,out] space tablespace to release */ void -fil_space_release( - fil_space_t* space); +fil_space_release(fil_space_t* space); /** Return the next fil_space_t. Once started, the caller must keep calling this until it returns NULL. diff --git a/win/packaging/CPackWixConfig.cmake b/win/packaging/CPackWixConfig.cmake index be07ff7d561e7..e954110ef19ca 100644 --- a/win/packaging/CPackWixConfig.cmake +++ b/win/packaging/CPackWixConfig.cmake @@ -9,7 +9,7 @@ IF(ESSENTIALS) ENDIF() ELSE() SET(CPACK_COMPONENTS_USED - "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management") + "Server;Client;Development;SharedLibraries;Documentation;Readme;Debuginfo;Common;VCCRT;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine") ENDIF() SET( WIX_FEATURE_MySQLServer_EXTRA_FEATURES "DBInstance;SharedClientServerComponents") @@ -35,6 +35,7 @@ SET(CPACK_COMPONENTS_ALL ${CPACK_ALL}) SET(CPACK_COMPONENT_GROUP_ALWAYSINSTALL_HIDDEN 1) SET(CPACK_COMPONENT_README_GROUP "AlwaysInstall") SET(CPACK_COMPONENT_COMMON_GROUP "AlwaysInstall") +SET(CPACK_COMPONENT_VCCRT_GROUP "AlwaysInstall") # Feature MySQL Server SET(CPACK_COMPONENT_GROUP_MYSQLSERVER_DISPLAY_NAME "MariaDB Server") @@ -57,7 +58,7 @@ SET(CPACK_COMPONENT_GROUP_MYSQLSERVER_DESCRIPTION "Install server") #Miscellaneous (hidden) components, part of server / or client programs - FOREACH(comp connect-engine ClientPlugins gssapi-server gssapi-client aws-key-management) + FOREACH(comp connect-engine ClientPlugins gssapi-server gssapi-client aws-key-management rocksdb-engine) STRING(TOUPPER "${comp}" comp) SET(CPACK_COMPONENT_${comp}_GROUP "MySQLServer") SET(CPACK_COMPONENT_${comp}_HIDDEN 1) diff --git a/win/upgrade_wizard/CMakeLists.txt b/win/upgrade_wizard/CMakeLists.txt index 44d6249ea1e9f..dc4ef67387d24 100644 --- a/win/upgrade_wizard/CMakeLists.txt +++ b/win/upgrade_wizard/CMakeLists.txt @@ -16,10 +16,13 @@ IF(NOT MFC_FOUND) ENDIF() RETURN() ENDIF() - -# MFC should be statically linked -SET(CMAKE_MFC_FLAG 1) - +IF(MSVC_CRT_TYPE MATCHES "/MD") + # MFC should be dynamically linked + SET(CMAKE_MFC_FLAG 2) +ELSE() + # MFC should be statically linked + SET(CMAKE_MFC_FLAG 1) +ENDIF() # Enable exception handling (avoids warnings) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")