From 43d5edf97c5f9e86173816c1837a1d01267c5165 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Fri, 28 Jul 2017 17:52:07 +0000 Subject: [PATCH 01/34] Copy of commit 394d0712d3d46a87a8063e14e998e9c22336e3a6 Author: Anca Agape Date: Thu Jul 27 15:43:07 2017 -0700 Fix rpl.rpl_4threads_deadlock test broken by D5005670 Summary: In D5005670 in fill_fields_processlist() function we introduced a point where we were trying to take the LOCK_thd_data before the synchronization point used by test processlist_after_LOCK_thd_count_before_LOCK_thd_data. This was happening in get_attached_srv_session() function called. Replaced this with get_attached_srv_session_safe() and moved it after lock is aquired. Reviewed By: tianx Differential Revision: D5505992 fbshipit-source-id: bc53924 --- storage/rocksdb/CMakeLists.txt | 42 +- storage/rocksdb/README | 24 +- storage/rocksdb/ha_rocksdb.cc | 2794 ++++++++++++----- storage/rocksdb/ha_rocksdb.h | 178 +- storage/rocksdb/ha_rocksdb_proto.h | 14 +- .../rocksdb/r/add_index_inplace.result | 43 + .../rocksdb/r/add_unique_index_inplace.result | 18 +- .../rocksdb/r/allow_no_primary_key.result | 15 +- .../mysql-test/rocksdb/r/bloomfilter.result | 810 ++++- .../rocksdb/r/bloomfilter_skip.result | 810 ++++- .../mysql-test/rocksdb/r/bulk_load.result | 26 +- .../rocksdb/r/bulk_load_errors.result | 19 + .../rocksdb/r/bulk_load_rev_cf.result | 82 + .../r/bulk_load_rev_cf_and_data.result | 82 + .../rocksdb/r/bulk_load_rev_data.result | 82 + .../r/cons_snapshot_read_committed.result | 12 +- .../r/cons_snapshot_repeatable_read.result | 2 +- .../r/corrupted_data_reads_debug.result | 10 +- .../rocksdb/r/deadlock_stats.result | 51 + .../mysql-test/rocksdb/r/drop_table.result | 16 - .../mysql-test/rocksdb/r/drop_table2.result | 6 + .../mysql-test/rocksdb/r/foreign_key.result | 8 +- .../mysql-test/rocksdb/r/hermitage.result | 6 +- .../mysql-test/rocksdb/r/i_s_ddl.result | 10 +- .../rocksdb/r/index_merge_rocksdb.result | 48 + .../rocksdb/r/index_merge_rocksdb2.result | 1419 +++++++++ .../rocksdb/r/information_schema.result | 9 +- .../mysql-test/rocksdb/r/issue111.result | 2 +- .../r/issue243_transactionStatus.result | 155 + .../mysql-test/rocksdb/r/issue255.result | 21 + .../mysql-test/rocksdb/r/issue495.result | 2 - .../rocksdb/r/lock_wait_timeout_stats.result | 27 + .../rocksdb/mysql-test/rocksdb/r/misc.result | 1 + .../rocksdb/r/native_procedure.result | 397 +++ .../r/prefix_extractor_override.result | 76 + .../mysql-test/rocksdb/r/rocksdb.result | 88 +- .../rocksdb/r/rocksdb_cf_per_partition.result | 14 + .../mysql-test/rocksdb/r/rocksdb_locks.result | 2 +- .../mysql-test/rocksdb/r/rocksdb_parts.result | 2 +- .../mysql-test/rocksdb/r/rqg_runtime.result | 1 + .../mysql-test/rocksdb/r/show_engine.result | 164 +- .../mysql-test/rocksdb/r/singledelete.result | 22 +- .../mysql-test/rocksdb/r/table_stats.result | 4 +- .../rocksdb/r/tbl_opt_data_index_dir.result | 8 +- .../mysql-test/rocksdb/r/ttl_primary.result | 491 +++ .../r/ttl_primary_read_filtering.result | 238 ++ .../r/ttl_primary_with_partitions.result | 256 ++ .../rocksdb/r/type_set_indexes.result | 35 + .../mysql-test/rocksdb/r/type_varchar.result | 15 + .../rocksdb/r/use_direct_reads_writes.result | 2 +- .../rocksdb/r/varbinary_format.result | 260 ++ .../mysql-test/rocksdb/r/write_sync.result | 11 +- .../rocksdb/t/add_index_inplace.test | 47 +- .../rocksdb/t/add_unique_index_inplace.test | 23 +- .../rocksdb/t/allow_no_primary_key.test | 11 +- .../rocksdb/t/bloomfilter-master.opt | 2 +- .../mysql-test/rocksdb/t/bloomfilter.inc | 8 + .../mysql-test/rocksdb/t/bulk_load.inc | 156 + .../mysql-test/rocksdb/t/bulk_load.test | 119 +- .../rocksdb/t/bulk_load_errors.test | 39 + .../rocksdb/t/bulk_load_rev_cf.test | 6 + .../rocksdb/t/bulk_load_rev_cf_and_data.test | 6 + .../rocksdb/t/bulk_load_rev_data.test | 6 + .../mysql-test/rocksdb/t/collation.test | 40 +- .../rocksdb/t/collation_exception.test | 2 + .../rocksdb/t/consistent_snapshot.inc | 14 +- .../rocksdb/t/corrupted_data_reads_debug.test | 10 +- .../mysql-test/rocksdb/t/deadlock_stats.test | 3 + .../rocksdb/mysql-test/rocksdb/t/delete.test | 4 +- .../rocksdb/t/drop_table-master.opt | 3 +- .../mysql-test/rocksdb/t/drop_table.test | 5 - .../mysql-test/rocksdb/t/drop_table2.test | 8 + .../rocksdb/t/drop_table_compactions.pl | 37 - .../mysql-test/rocksdb/t/duplicate_table.test | 2 +- .../mysql-test/rocksdb/t/foreign_key.test | 2 + .../rocksdb/t/index_merge_rocksdb-master.opt | 1 + .../rocksdb/t/index_merge_rocksdb.test | 109 + .../rocksdb/t/index_merge_rocksdb2-master.opt | 1 + .../rocksdb/t/index_merge_rocksdb2.test | 70 + .../rocksdb/t/information_schema.test | 4 +- .../rocksdb/mysql-test/rocksdb/t/insert.test | 4 +- .../rocksdb/t/issue243_transactionStatus.test | 80 + .../mysql-test/rocksdb/t/issue255.test | 19 + .../mysql-test/rocksdb/t/issue314.test | 2 +- .../mysql-test/rocksdb/t/issue495.test | 5 + .../rocksdb/mysql-test/rocksdb/t/loaddata.inc | 2 +- .../rocksdb/t/lock_wait_timeout_stats.test | 34 + .../rocksdb/t/native_procedure-master.opt | 1 + .../rocksdb/t/native_procedure.test | 2 + .../t/optimizer_loose_index_scans.test | 1 + .../t/prefix_extractor_override-master.opt | 1 + .../rocksdb/t/prefix_extractor_override.test | 96 + .../mysql-test/rocksdb/t/read_only_tx.test | 2 +- .../rocksdb/mysql-test/rocksdb/t/rocksdb.test | 46 +- .../rocksdb/t/rocksdb_cf_per_partition.test | 16 + .../rocksdb/t/rollback_savepoint.test | 2 + .../mysql-test/rocksdb/t/rpl_savepoint.test | 18 +- .../mysql-test/rocksdb/t/rpl_statement.test | 4 +- .../mysql-test/rocksdb/t/rqg_runtime.test | 1 + .../select_for_update_skip_locked_nowait.test | 2 + .../mysql-test/rocksdb/t/set_checkpoint.inc | 2 +- .../mysql-test/rocksdb/t/show_engine.test | 6 +- .../rocksdb/t/singledelete-master.opt | 2 +- .../mysql-test/rocksdb/t/singledelete.test | 18 +- .../mysql-test/rocksdb/t/sst_count_rows.sh | 4 +- .../mysql-test/rocksdb/t/trx_info_rpl.test | 1 + .../rocksdb/t/ttl_primary-master.opt | 2 + .../mysql-test/rocksdb/t/ttl_primary.test | 547 ++++ .../t/ttl_primary_read_filtering-master.opt | 1 + .../rocksdb/t/ttl_primary_read_filtering.test | 371 +++ .../t/ttl_primary_with_partitions-master.opt | 2 + .../t/ttl_primary_with_partitions.test | 253 ++ .../rocksdb/t/type_set_indexes.test | 22 +- .../rocksdb/t/type_varchar-master.opt | 1 + .../rocksdb/t/type_varchar_endspace.inc | 1 + .../rocksdb/t/unsupported_tx_isolations.test | 8 +- .../rocksdb/mysql-test/rocksdb/t/update.test | 4 +- .../rocksdb/t/use_direct_reads_writes.test | 4 +- .../rocksdb/t/varbinary_format.test | 131 + .../mysql-test/rocksdb/t/write_sync.test | 12 +- .../rocksdb_hotbackup/include/create_table.sh | 16 + .../rocksdb_hotbackup/include/stream_run.sh | 31 +- .../rocksdb_hotbackup/r/xbstream.result | 1 + .../rocksdb_hotbackup/t/xbstream.test | 3 + .../r/rpl_skip_trx_api_binlog_format.result | 2 +- .../r/singledelete_idempotent_recovery.result | 24 + .../r/singledelete_idempotent_table.result | 25 + ...c-mater.opt => multiclient_2pc-master.opt} | 0 .../rocksdb_rpl/t/multiclient_2pc.test | 2 +- .../rocksdb_rpl/t/rpl_gtid_crash_safe.test | 1 + .../t/rpl_gtid_crash_safe_wal_corrupt.inc | 1 + .../t/rpl_gtid_rocksdb_sys_header.test | 1 + .../t/rpl_no_unique_check_on_lag.test | 1 + .../t/rpl_no_unique_check_on_lag_mts.test | 1 + .../rocksdb_rpl/t/rpl_rocksdb_snapshot.test | 5 +- .../t/rpl_rocksdb_snapshot_without_gtid.test | 1 + .../t/rpl_rocksdb_stress_crash-slave.opt | 1 + .../t/rpl_skip_trx_api_binlog_format.test | 1 + .../t/singledelete_idempotent_recovery.cnf | 15 + .../t/singledelete_idempotent_recovery.test | 72 + .../t/singledelete_idempotent_table.cnf | 15 + .../t/singledelete_idempotent_table.test | 44 + .../include/correctboolvalue.inc | 25 + .../include/rocksdb_sys_var.inc | 119 + .../rocksdb_sys_vars/r/all_vars.result | 2 - ...low_concurrent_memtable_write_basic.result | 63 +- .../r/rocksdb_background_sync_basic.result | 68 - ...b_base_background_compactions_basic.result | 7 - .../r/rocksdb_create_checkpoint_basic.result | 2 +- ...ksdb_debug_ttl_read_filter_ts_basic.result | 46 + .../r/rocksdb_debug_ttl_rec_ts_basic.result | 46 + ...rocksdb_debug_ttl_snapshot_ts_basic.result | 46 + .../r/rocksdb_delayed_write_rate_basic.result | 20 +- ...result => rocksdb_enable_2pc_basic.result} | 0 ...ocksdb_enable_thread_tracking_basic.result | 2 +- .../r/rocksdb_enable_ttl_basic.result | 64 + ...sdb_enable_ttl_read_filtering_basic.result | 64 + ...e_write_thread_adaptive_yield_basic.result | 63 +- ...cksdb_flush_log_at_trx_commit_basic.result | 42 +- ..._flush_memtable_and_lzero_now_basic.result | 50 + .../r/rocksdb_io_write_timeout_basic.result | 86 + ...db_max_background_compactions_basic.result | 46 - ...ocksdb_max_background_flushes_basic.result | 7 - .../rocksdb_max_background_jobs_basic.result | 46 + .../r/rocksdb_reset_stats_basic.result | 97 + ...db_sst_mgr_rate_bytes_per_sec_basic.result | 85 + .../r/rocksdb_update_cf_options_basic.result | 108 + ...t_io_for_flush_and_compaction_basic.result | 7 + .../r/rocksdb_use_direct_writes_basic.result | 7 - ...rocksdb_write_batch_max_bytes_basic.result | 15 + .../rocksdb_sys_vars/t/all_vars.test | 1 + ...access_hint_on_compaction_start_basic.test | 2 +- .../rocksdb_advise_random_on_open_basic.test | 2 +- ...allow_concurrent_memtable_write_basic.test | 17 +- .../t/rocksdb_allow_mmap_reads_basic.test | 2 +- .../t/rocksdb_allow_mmap_writes_basic.test | 2 +- ...sdb_base_background_compactions_basic.test | 7 - ...ocksdb_blind_delete_primary_key_basic.test | 2 +- .../t/rocksdb_block_cache_size_basic.test | 2 +- .../rocksdb_block_restart_interval_basic.test | 2 +- .../t/rocksdb_block_size_basic.test | 2 +- .../t/rocksdb_block_size_deviation_basic.test | 2 +- .../t/rocksdb_bulk_load_basic.test | 2 +- .../t/rocksdb_bulk_load_size_basic.test | 2 +- .../t/rocksdb_bytes_per_sync_basic.test | 2 +- ...b_cache_index_and_filter_blocks_basic.test | 2 +- .../t/rocksdb_checksums_pct_basic.test | 2 +- .../rocksdb_collect_sst_properties_basic.test | 2 +- .../t/rocksdb_commit_in_the_middle_basic.test | 2 +- .../t/rocksdb_compact_cf_basic.test | 2 +- ...cksdb_compaction_readahead_size_basic.test | 2 +- ...b_compaction_sequential_deletes_basic.test | 2 +- ...ion_sequential_deletes_count_sd_basic.test | 2 +- ...on_sequential_deletes_file_size_basic.test | 2 +- ...ction_sequential_deletes_window_basic.test | 2 +- .../t/rocksdb_create_checkpoint_basic.test | 2 +- .../t/rocksdb_create_if_missing_basic.test | 2 +- ..._create_missing_column_families_basic.test | 2 +- .../t/rocksdb_datadir_basic.test | 2 +- .../t/rocksdb_db_write_buffer_size_basic.test | 2 +- .../t/rocksdb_deadlock_detect_basic.test | 2 +- ...g_optimizer_no_zero_cardinality_basic.test | 2 +- ...ocksdb_debug_ttl_read_filter_ts_basic.test | 16 + .../t/rocksdb_debug_ttl_rec_ts_basic.test | 16 + .../rocksdb_debug_ttl_snapshot_ts_basic.test | 16 + .../t/rocksdb_default_cf_options_basic.test | 2 +- .../t/rocksdb_delayed_write_rate_basic.test | 2 +- ...te_obsolete_files_period_micros_basic.test | 2 +- ...sic.test => rocksdb_enable_2pc_basic.test} | 2 +- .../t/rocksdb_enable_bulk_load_api_basic.test | 2 +- .../rocksdb_enable_thread_tracking_basic.test | 2 +- ...sic.test => rocksdb_enable_ttl_basic.test} | 6 +- ...cksdb_enable_ttl_read_filtering_basic.test | 18 + ...ble_write_thread_adaptive_yield_basic.test | 17 +- .../t/rocksdb_error_if_exists_basic.test | 2 +- ...rocksdb_flush_log_at_trx_commit_basic.test | 5 +- ...ce_flush_memtable_and_lzero_now_basic.test | 17 + ...ocksdb_force_flush_memtable_now_basic.test | 2 +- ...db_force_index_records_in_range_basic.test | 2 +- ...ksdb_hash_index_allow_collision_basic.test | 2 +- .../t/rocksdb_index_type_basic.test | 2 +- .../t/rocksdb_info_log_level_basic.test | 2 +- .../t/rocksdb_io_write_timeout_basic.test | 20 + .../t/rocksdb_is_fd_close_on_exec_basic.test | 2 +- .../t/rocksdb_keep_log_file_num_basic.test | 2 +- .../t/rocksdb_lock_scanned_rows_basic.test | 2 +- .../t/rocksdb_lock_wait_timeout_basic.test | 2 +- .../rocksdb_log_file_time_to_roll_basic.test | 2 +- ...sdb_manifest_preallocation_size_basic.test | 2 +- .../t/rocksdb_master_skip_tx_api_basic.test | 2 +- .../rocksdb_max_background_flushes_basic.test | 6 - ...=> rocksdb_max_background_jobs_basic.test} | 4 +- .../t/rocksdb_max_log_file_size_basic.test | 2 +- .../rocksdb_max_manifest_file_size_basic.test | 2 +- .../t/rocksdb_max_open_files_basic.test | 2 +- .../t/rocksdb_max_row_locks_basic.test | 2 +- .../t/rocksdb_max_subcompactions_basic.test | 2 +- .../t/rocksdb_max_total_wal_size_basic.test | 2 +- ...le_reader_for_compaction_inputs_basic.test | 2 +- .../t/rocksdb_no_block_cache_basic.test | 2 +- .../t/rocksdb_override_cf_options_basic.test | 2 +- .../t/rocksdb_paranoid_checks_basic.test | 2 +- .../rocksdb_pause_background_work_basic.test | 2 +- .../t/rocksdb_perf_context_level_basic.test | 2 +- .../rocksdb_persistent_cache_path_basic.test | 2 +- ...ocksdb_persistent_cache_size_mb_basic.test | 2 +- ...ilter_and_index_blocks_in_cache_basic.test | 2 +- ...print_snapshot_conflict_queries_basic.test | 2 +- ...ksdb_rate_limiter_bytes_per_sec_basic.test | 2 +- .../t/rocksdb_read_free_rpl_tables_basic.test | 2 +- .../t/rocksdb_records_in_range_basic.test | 2 +- .../t/rocksdb_reset_stats_basic.test | 21 + ...b_seconds_between_stat_computes_basic.test | 2 +- ...ocksdb_signal_drop_index_thread_basic.test | 2 +- ...cksdb_skip_bloom_filter_on_read_basic.test | 2 +- .../t/rocksdb_skip_fill_cache_basic.test | 2 +- ...ocksdb_skip_unique_check_tables_basic.test | 2 +- ...ksdb_sst_mgr_rate_bytes_per_sec_basic.test | 22 + .../rocksdb_stats_dump_period_sec_basic.test | 2 +- ...cksdb_store_row_debug_checksums_basic.test | 2 +- .../rocksdb_strict_collation_check_basic.test | 2 +- ...ocksdb_table_cache_numshardbits_basic.test | 2 +- ...ocksdb_table_stats_sampling_pct_basic.test | 2 +- .../t/rocksdb_trace_sst_api_basic.test | 2 +- .../t/rocksdb_unsafe_for_binlog_basic.test | 2 +- .../t/rocksdb_update_cf_options_basic.test | 94 + .../t/rocksdb_use_adaptive_mutex_basic.test | 2 +- ...ect_io_for_flush_and_compaction_basic.test | 6 + .../t/rocksdb_use_direct_reads_basic.test | 2 +- .../t/rocksdb_use_direct_writes_basic.test | 6 - .../t/rocksdb_use_fsync_basic.test | 2 +- .../t/rocksdb_validate_tables_basic.test | 2 +- ...ksdb_verify_row_debug_checksums_basic.test | 2 +- .../t/rocksdb_wal_bytes_per_sync_basic.test | 2 +- .../t/rocksdb_wal_dir_basic.test | 2 +- .../t/rocksdb_wal_recovery_mode_basic.test | 2 +- .../t/rocksdb_wal_size_limit_mb_basic.test | 2 +- .../t/rocksdb_wal_ttl_seconds_basic.test | 2 +- .../t/rocksdb_whole_key_filtering_basic.test | 2 +- .../rocksdb_write_batch_max_bytes_basic.test | 26 + .../t/rocksdb_write_disable_wal_basic.test | 2 +- ..._ignore_missing_column_families_basic.test | 2 +- storage/rocksdb/rdb_buff.h | 10 + storage/rocksdb/rdb_cf_manager.cc | 87 +- storage/rocksdb/rdb_cf_manager.h | 27 +- storage/rocksdb/rdb_cf_options.cc | 49 +- storage/rocksdb/rdb_cf_options.h | 9 +- storage/rocksdb/rdb_compact_filter.h | 117 +- storage/rocksdb/rdb_datadic.cc | 1188 +++++-- storage/rocksdb/rdb_datadic.h | 317 +- storage/rocksdb/rdb_i_s.cc | 345 +- storage/rocksdb/rdb_index_merge.cc | 22 +- storage/rocksdb/rdb_io_watchdog.cc | 233 ++ storage/rocksdb/rdb_io_watchdog.h | 113 + storage/rocksdb/rdb_perf_context.cc | 34 +- storage/rocksdb/rdb_psi.h | 1 + storage/rocksdb/rdb_sst_info.cc | 227 +- storage/rocksdb/rdb_sst_info.h | 130 +- storage/rocksdb/rdb_threads.h | 2 + storage/rocksdb/rdb_utils.cc | 14 + storage/rocksdb/rdb_utils.h | 3 + storage/rocksdb/rocksdb-range-access.txt | 145 +- 302 files changed, 13809 insertions(+), 2683 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb2.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue243_transactionStatus.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/issue255.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/native_procedure.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/prefix_extractor_override.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/ttl_primary.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/deadlock_stats.test delete mode 100755 storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue243_transactionStatus.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/issue255.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/native_procedure-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/native_procedure.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/ttl_primary-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/type_varchar-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test create mode 100755 storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_table.sh create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_table.result rename storage/rocksdb/mysql-test/rocksdb_rpl/t/{multiclient_2pc-mater.opt => multiclient_2pc-master.opt} (100%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.cnf create mode 100644 storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_read_filter_ts_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_rec_ts_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_snapshot_ts_basic.result rename storage/rocksdb/mysql-test/rocksdb_sys_vars/r/{rocksdb_disable_2pc_basic.result => rocksdb_enable_2pc_basic.result} (100%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_read_filtering_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_and_lzero_now_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_io_write_timeout_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_jobs_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_reset_stats_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_sst_mgr_rate_bytes_per_sec_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_io_for_flush_and_compaction_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_batch_max_bytes_basic.result delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_read_filter_ts_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_rec_ts_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_snapshot_ts_basic.test rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_disable_2pc_basic.test => rocksdb_enable_2pc_basic.test} (91%) rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_background_sync_basic.test => rocksdb_enable_ttl_basic.test} (76%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_read_filtering_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_and_lzero_now_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_io_write_timeout_basic.test delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test rename storage/rocksdb/mysql-test/rocksdb_sys_vars/t/{rocksdb_max_background_compactions_basic.test => rocksdb_max_background_jobs_basic.test} (79%) create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_reset_stats_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_sst_mgr_rate_bytes_per_sec_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_io_for_flush_and_compaction_basic.test delete mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test create mode 100644 storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_batch_max_bytes_basic.test create mode 100644 storage/rocksdb/rdb_io_watchdog.cc create mode 100644 storage/rocksdb/rdb_io_watchdog.h diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 12e88b24b49a9..ec9705891e08d 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -6,7 +6,7 @@ ENDIF() CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU) IF(HAVE_SCHED_GETCPU) - ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1) + ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU=1 -DROCKSDB_SCHED_GETCPU_PRESENT) ENDIF() # get a list of rocksdb library source files @@ -25,14 +25,34 @@ INCLUDE_DIRECTORIES( ${CMAKE_SOURCE_DIR}/rocksdb/third-party/gtest-1.7.0/fused-src ) -ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX - -DZLIB) +# This is a strong requirement coming from RocksDB. No conditional checks here. +ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) -CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE) -IF(HAVE_FALLOCATE) - ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT) +IF(UNIX) + IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + ADD_DEFINITIONS(-DOS_LINUX) + + CHECK_INCLUDE_FILES(linux/falloc.h HAVE_LINUX_FALLOC_H) + CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE) + + IF(HAVE_FALLOCATE AND HAVE_LINUX_FALLOC_H) + ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT) + ENDIF() + ENDIF() ENDIF() +CHECK_CXX_SOURCE_COMPILES(" +#if defined(_MSC_VER) && !defined(__thread) +#define __thread __declspec(thread) +#endif +int main() { + static __thread int tls; +} +" HAVE_THREAD_LOCAL) +if(HAVE_THREAD_LOCAL) + ADD_DEFINITIONS(-DROCKSDB_SUPPORT_THREAD_LOCAL) +endif() + SET(ROCKSDB_SOURCES ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h logger.h @@ -44,6 +64,7 @@ SET(ROCKSDB_SOURCES event_listener.cc event_listener.h rdb_i_s.cc rdb_i_s.h rdb_index_merge.cc rdb_index_merge.h + rdb_io_watchdog.cc rdb_io_watchdog.h rdb_perf_context.cc rdb_perf_context.h rdb_mutex_wrapper.cc rdb_mutex_wrapper.h rdb_psi.h rdb_psi.cc @@ -85,7 +106,14 @@ IF (NOT "$ENV{WITH_ZSTD}" STREQUAL "") ADD_DEFINITIONS(-DZSTD) ENDIF() -SET(rocksdb_static_libs ${rocksdb_static_libs} ${ZLIB_LIBRARY} "-lrt") +CHECK_INCLUDE_FILES(zlib.h HAVE_ZLIB_H) + +IF (HAVE_ZLIB_H) + ADD_DEFINITIONS(-DZLIB) + SET(rocksdb_static_libs ${rocksdb_static_libs} ${ZLIB_LIBRARY}) +ENDIF() + +SET(rocksdb_static_libs ${rocksdb_static_libs} "-lrt") MYSQL_ADD_PLUGIN(rocksdb_se ${ROCKSDB_SOURCES} STORAGE_ENGINE DEFAULT STATIC_ONLY LINK_LIBRARIES ${rocksdb_static_libs} diff --git a/storage/rocksdb/README b/storage/rocksdb/README index 472b7986f9150..3af455924a462 100644 --- a/storage/rocksdb/README +++ b/storage/rocksdb/README @@ -1,12 +1,20 @@ == Summary == -This directory contains RocksDB-based Storage Engine (RDBSE) for MySQL = "MyRocks". +This directory contains RocksDB-based Storage Engine (RDBSE) for MySQL, +also known as "MyRocks". == Resources == -See https://github.com/facebook/mysql-5.6/wiki/Getting-Started-with-MyRocks -Facebook group: https://www.facebook.com/groups/mysqlonrocksdb/ +https://github.com/facebook/mysql-5.6/wiki/Getting-Started-with-MyRocks +https://www.facebook.com/groups/MyRocks/ == Coding Conventions == -The baseline for MyRocks coding conventions is the MySQL set, available at +The baseline for MyRocks coding conventions for the code in storage/rocksdb/ +is based on the default clang format with a few minor changes. The file +storage/rocksdb/.clang-format describes conventions and can be integrated +with Vim or Emacs as described here: +http://releases.llvm.org/3.6.0/tools/clang/docs/ClangFormat.html#vim-integration + +All code outside of storage/rocksdb/ should conform to the MySQL coding +conventions: http://dev.mysql.com/doc/internals/en/coding-guidelines.html. Several refinements: @@ -34,5 +42,9 @@ Several refinements: "_vect" for a std::vector etc. == Running Tests == -To run tests from rocksdb, rocksd_rpl or rocksdb_sys_vars packages, use the following parameters: ---mysqld=--default-storage-engine=rocksdb --mysqld=--skip-innodb --mysqld=--default-tmp-storage-engine=MyISAM --mysqld=--rocksdb \ No newline at end of file +To run tests from rocksdb, rocksdb_rpl or other rocksdb_* packages, use the +following parameters: + --default-storage-engine=rocksdb + --skip-innodb + --default-tmp-storage-engine=MyISAM + --rocksdb diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index d3157b0b800ef..0df153b48534c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -25,6 +25,8 @@ /* C++ standard header files */ #include +#include +#include #include #include #include @@ -40,14 +42,22 @@ #include #include +// Both MySQL and RocksDB define the same constant. To avoid compilation errors +// till we make the fix in RocksDB, we'll temporary undefine it here. +#undef CACHE_LINE_SIZE + /* RocksDB includes */ +#include "monitoring/histogram.h" #include "rocksdb/compaction_filter.h" +#include "rocksdb/env.h" #include "rocksdb/persistent_cache.h" #include "rocksdb/rate_limiter.h" #include "rocksdb/slice_transform.h" +#include "rocksdb/thread_status.h" #include "rocksdb/utilities/checkpoint.h" #include "rocksdb/utilities/convenience.h" #include "rocksdb/utilities/memory_util.h" +#include "util/stop_watch.h" /* MyRocks includes */ #include "./event_listener.h" @@ -90,6 +100,11 @@ namespace myrocks { static st_global_stats global_stats; static st_export_stats export_stats; +static st_memory_stats memory_stats; + +const std::string DEFAULT_CF_NAME("default"); +const std::string DEFAULT_SYSTEM_CF_NAME("__system__"); +const std::string PER_INDEX_CF_NAME("$per_index_cf"); /** Updates row counters based on the table type and operation type. @@ -110,14 +125,14 @@ static handler *rocksdb_create_handler(my_core::handlerton *hton, bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, const rocksdb::Slice &eq_cond, - const bool use_all_keys, bool is_ascending); + const bool use_all_keys); /////////////////////////////////////////////////////////// // Parameters and settings /////////////////////////////////////////////////////////// -static char *rocksdb_default_cf_options; -static char *rocksdb_override_cf_options; -Rdb_cf_options rocksdb_cf_options_map; +static char *rocksdb_default_cf_options = nullptr; +static char *rocksdb_override_cf_options = nullptr; +static char *rocksdb_update_cf_options = nullptr; /////////////////////////////////////////////////////////// // Globals @@ -125,6 +140,7 @@ Rdb_cf_options rocksdb_cf_options_map; handlerton *rocksdb_hton; rocksdb::TransactionDB *rdb = nullptr; +rocksdb::HistogramImpl *commit_latency_stats = nullptr; static std::shared_ptr rocksdb_stats; static std::unique_ptr flashcache_aware_env; @@ -135,6 +151,7 @@ Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; const char *m_mysql_gtid; Rdb_binlog_manager binlog_manager; +Rdb_io_watchdog *io_watchdog = nullptr; /** MyRocks background thread control @@ -148,11 +165,7 @@ static Rdb_background_thread rdb_bg_thread; // collation check requirement. Regex_list_handler *rdb_collation_exceptions; -static const char *const ERRSTR_ROLLBACK_ONLY = - "This transaction was rolled back and cannot be " - "committed. Only supported operation is to roll it back, " - "so all pending changes will be discarded. " - "Please restart another transaction."; +static const char **rdb_get_error_messages(); static void rocksdb_flush_all_memtables() { const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); @@ -175,8 +188,7 @@ static int rocksdb_compact_column_family(THD *const thd, DBUG_ASSERT(value != nullptr); if (const char *const cf = value->val_str(value, buff, &len)) { - bool is_automatic; - auto cfh = cf_manager.get_cf(cf, "", nullptr, &is_automatic); + auto cfh = cf_manager.get_cf(cf); if (cfh != nullptr && rdb != nullptr) { sql_print_information("RocksDB: Manual compaction of column family: %s\n", cf); @@ -246,30 +258,27 @@ static int rocksdb_create_checkpoint( checkpoint_dir.c_str()); rocksdb::Checkpoint *checkpoint; auto status = rocksdb::Checkpoint::Create(rdb, &checkpoint); + // We can only return HA_EXIT_FAILURE/HA_EXIT_SUCCESS here which is why + // the return code is ignored, but by calling into rdb_error_to_mysql, + // it will call my_error for us, which will propogate up to the client. + int rc __attribute__((__unused__)); if (status.ok()) { status = checkpoint->CreateCheckpoint(checkpoint_dir.c_str()); + delete checkpoint; if (status.ok()) { sql_print_information( "RocksDB: created checkpoint in directory : %s\n", checkpoint_dir.c_str()); + return HA_EXIT_SUCCESS; } else { - my_printf_error( - ER_UNKNOWN_ERROR, - "RocksDB: Failed to create checkpoint directory. status %d %s", - MYF(0), status.code(), status.ToString().c_str()); + rc = ha_rocksdb::rdb_error_to_mysql(status); } - delete checkpoint; } else { - const std::string err_text(status.ToString()); - my_printf_error( - ER_UNKNOWN_ERROR, - "RocksDB: failed to initialize checkpoint. status %d %s\n", MYF(0), - status.code(), err_text.c_str()); + rc = ha_rocksdb::rdb_error_to_mysql(status); } - return status.code(); } } - return HA_ERR_INTERNAL_ERROR; + return HA_EXIT_FAILURE; } /* This method is needed to indicate that the @@ -286,8 +295,48 @@ static void rocksdb_force_flush_memtable_now_stub( static int rocksdb_force_flush_memtable_now( THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, struct st_mysql_value *const value) { - sql_print_information("RocksDB: Manual memtable flush\n"); + sql_print_information("RocksDB: Manual memtable flush."); + rocksdb_flush_all_memtables(); + return HA_EXIT_SUCCESS; +} + +static void rocksdb_force_flush_memtable_and_lzero_now_stub( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + const void *const save) {} + +static int rocksdb_force_flush_memtable_and_lzero_now( + THD *const thd, struct st_mysql_sys_var *const var, void *const var_ptr, + struct st_mysql_value *const value) { + sql_print_information("RocksDB: Manual memtable and L0 flush."); rocksdb_flush_all_memtables(); + + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + rocksdb::CompactionOptions c_options = rocksdb::CompactionOptions(); + rocksdb::ColumnFamilyMetaData metadata; + rocksdb::ColumnFamilyDescriptor cf_descr; + + for (const auto &cf_handle : cf_manager.get_all_cf()) { + rdb->GetColumnFamilyMetaData(cf_handle, &metadata); + cf_handle->GetDescriptor(&cf_descr); + c_options.output_file_size_limit = cf_descr.options.target_file_size_base; + + DBUG_ASSERT(metadata.levels[0].level == 0); + std::vector file_names; + for (auto &file : metadata.levels[0].files) { + file_names.emplace_back(file.db_path + file.name); + } + + if (!file_names.empty()) { + rocksdb::Status s; + s = rdb->CompactFiles(c_options, cf_handle, file_names, 1); + + if (!s.ok() && !s.IsAborted()) { + rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); + return HA_EXIT_FAILURE; + } + } + } + return HA_EXIT_SUCCESS; } @@ -330,6 +379,11 @@ static void rocksdb_set_rate_limiter_bytes_per_sec(THD *thd, void *var_ptr, const void *save); +static void rocksdb_set_sst_mgr_rate_bytes_per_sec(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); + static void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save); @@ -340,29 +394,37 @@ static void rocksdb_set_collation_exception_list(THD *thd, void *var_ptr, const void *save); +void rocksdb_set_update_cf_options(THD *thd, + struct st_mysql_sys_var *var, + void *var_ptr, + const void *save); + static void rocksdb_set_bulk_load(THD *thd, struct st_mysql_sys_var *var MY_ATTRIBUTE((__unused__)), void *var_ptr, const void *save); -static void rocksdb_set_max_background_compactions( - THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, - const void *const save); +static void rocksdb_set_max_background_jobs(THD *thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save); ////////////////////////////////////////////////////////////////////////////// // Options definitions ////////////////////////////////////////////////////////////////////////////// static long long rocksdb_block_cache_size; /* Use unsigned long long instead of uint64_t because of MySQL compatibility */ -static unsigned long long // NOLINT(runtime/int) +static unsigned long long // NOLINT(runtime/int) rocksdb_rate_limiter_bytes_per_sec; +static unsigned long long // NOLINT(runtime/int) + rocksdb_sst_mgr_rate_bytes_per_sec; static unsigned long long rocksdb_delayed_write_rate; -static unsigned long // NOLINT(runtime/int) +static unsigned long // NOLINT(runtime/int) rocksdb_persistent_cache_size_mb; static uint64_t rocksdb_info_log_level; static char *rocksdb_wal_dir; static char *rocksdb_persistent_cache_path; static uint64_t rocksdb_index_type; -static char rocksdb_background_sync; +static uint32_t rocksdb_flush_log_at_trx_commit; static uint32_t rocksdb_debug_optimizer_n_rows; static my_bool rocksdb_force_compute_memtable_stats; static my_bool rocksdb_debug_optimizer_no_zero_cardinality; @@ -376,6 +438,14 @@ static my_bool rocksdb_enable_2pc = 0; static char *rocksdb_strict_collation_exceptions; static my_bool rocksdb_collect_sst_properties = 1; static my_bool rocksdb_force_flush_memtable_now_var = 0; +static my_bool rocksdb_force_flush_memtable_and_lzero_now_var = 0; +static my_bool rocksdb_enable_ttl = 1; +static my_bool rocksdb_enable_ttl_read_filtering = 1; +static int rocksdb_debug_ttl_rec_ts = 0; +static int rocksdb_debug_ttl_snapshot_ts = 0; +static int rocksdb_debug_ttl_read_filter_ts = 0; +static my_bool rocksdb_reset_stats = 0; +static uint32_t rocksdb_io_write_timeout_secs = 0; static uint64_t rocksdb_number_stat_computes = 0; static uint32_t rocksdb_seconds_between_stat_computes = 3600; static long long rocksdb_compaction_sequential_deletes = 0l; @@ -390,19 +460,23 @@ static my_bool rocksdb_print_snapshot_conflict_queries = 0; std::atomic rocksdb_snapshot_conflict_errors(0); std::atomic rocksdb_wal_group_syncs(0); -static rocksdb::DBOptions rdb_init_rocksdb_db_options(void) { - rocksdb::DBOptions o; +static std::unique_ptr rdb_init_rocksdb_db_options(void) { + auto o = std::unique_ptr(new rocksdb::DBOptions()); - o.create_if_missing = true; - o.listeners.push_back(std::make_shared(&ddl_manager)); - o.info_log_level = rocksdb::InfoLogLevel::INFO_LEVEL; - o.max_subcompactions = DEFAULT_SUBCOMPACTIONS; + o->create_if_missing = true; + o->listeners.push_back(std::make_shared(&ddl_manager)); + o->info_log_level = rocksdb::InfoLogLevel::INFO_LEVEL; + o->max_subcompactions = DEFAULT_SUBCOMPACTIONS; return o; } -static rocksdb::DBOptions rocksdb_db_options = rdb_init_rocksdb_db_options(); -static rocksdb::BlockBasedTableOptions rocksdb_tbl_options; +/* DBOptions contains Statistics and needs to be destructed last */ +static std::unique_ptr rocksdb_tbl_options = + std::unique_ptr( + new rocksdb::BlockBasedTableOptions()); +static std::unique_ptr rocksdb_db_options = + rdb_init_rocksdb_db_options(); static std::shared_ptr rocksdb_rate_limiter; @@ -422,11 +496,55 @@ static void rocksdb_set_rocksdb_info_log_level( RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); rocksdb_info_log_level = *static_cast(save); - rocksdb_db_options.info_log->SetInfoLogLevel( + rocksdb_db_options->info_log->SetInfoLogLevel( static_cast(rocksdb_info_log_level)); RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } +static void rocksdb_set_reset_stats( + my_core::THD *const /* unused */, + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr, const void *const save) { + DBUG_ASSERT(save != nullptr); + DBUG_ASSERT(rdb != nullptr); + DBUG_ASSERT(rocksdb_stats != nullptr); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + *static_cast(var_ptr) = *static_cast(save); + + if (rocksdb_reset_stats) { + rocksdb::Status s = rdb->ResetStats(); + + // RocksDB will always return success. Let's document this assumption here + // as well so that we'll get immediately notified when contract changes. + DBUG_ASSERT(s == rocksdb::Status::OK()); + + s = rocksdb_stats->Reset(); + DBUG_ASSERT(s == rocksdb::Status::OK()); + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +static void rocksdb_set_io_write_timeout( + my_core::THD *const thd MY_ATTRIBUTE((__unused__)), + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + DBUG_ASSERT(save != nullptr); + DBUG_ASSERT(rdb != nullptr); + DBUG_ASSERT(io_watchdog != nullptr); + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + const uint32_t new_val = *static_cast(save); + + rocksdb_io_write_timeout_secs = new_val; + io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS}; static TYPELIB index_type_typelib = {array_elements(index_type_names) - 1, @@ -508,6 +626,11 @@ static MYSQL_THDVAR_ULONG(max_row_locks, PLUGIN_VAR_RQCMDARG, /*min*/ 1, /*max*/ RDB_MAX_ROW_LOCKS, 0); +static MYSQL_THDVAR_ULONGLONG( + write_batch_max_bytes, PLUGIN_VAR_RQCMDARG, + "Maximum size of write batch in bytes. 0 means no limit.", nullptr, nullptr, + /* default */ 0, /* min */ 0, /* max */ SIZE_T_MAX, 1); + static MYSQL_THDVAR_BOOL( lock_scanned_rows, PLUGIN_VAR_RQCMDARG, "Take and hold locks on rows that are scanned but not updated", nullptr, @@ -541,32 +664,32 @@ static MYSQL_THDVAR_ULONGLONG( static MYSQL_SYSVAR_BOOL( create_if_missing, - *reinterpret_cast(&rocksdb_db_options.create_if_missing), + *reinterpret_cast(&rocksdb_db_options->create_if_missing), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::create_if_missing for RocksDB", nullptr, nullptr, - rocksdb_db_options.create_if_missing); + rocksdb_db_options->create_if_missing); static MYSQL_SYSVAR_BOOL( create_missing_column_families, *reinterpret_cast( - &rocksdb_db_options.create_missing_column_families), + &rocksdb_db_options->create_missing_column_families), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::create_missing_column_families for RocksDB", nullptr, nullptr, - rocksdb_db_options.create_missing_column_families); + rocksdb_db_options->create_missing_column_families); static MYSQL_SYSVAR_BOOL( error_if_exists, - *reinterpret_cast(&rocksdb_db_options.error_if_exists), + *reinterpret_cast(&rocksdb_db_options->error_if_exists), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::error_if_exists for RocksDB", nullptr, nullptr, - rocksdb_db_options.error_if_exists); + rocksdb_db_options->error_if_exists); static MYSQL_SYSVAR_BOOL( paranoid_checks, - *reinterpret_cast(&rocksdb_db_options.paranoid_checks), + *reinterpret_cast(&rocksdb_db_options->paranoid_checks), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::paranoid_checks for RocksDB", nullptr, nullptr, - rocksdb_db_options.paranoid_checks); + rocksdb_db_options->paranoid_checks); static MYSQL_SYSVAR_ULONGLONG( rate_limiter_bytes_per_sec, rocksdb_rate_limiter_bytes_per_sec, @@ -574,11 +697,19 @@ static MYSQL_SYSVAR_ULONGLONG( nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L, /* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0); +static MYSQL_SYSVAR_ULONGLONG( + sst_mgr_rate_bytes_per_sec, rocksdb_sst_mgr_rate_bytes_per_sec, + PLUGIN_VAR_RQCMDARG, + "DBOptions::sst_file_manager rate_bytes_per_sec for RocksDB", nullptr, + rocksdb_set_sst_mgr_rate_bytes_per_sec, + /* default */ DEFAULT_SST_MGR_RATE_BYTES_PER_SEC, + /* min */ 0L, /* max */ UINT64_MAX, 0); + static MYSQL_SYSVAR_ULONGLONG(delayed_write_rate, rocksdb_delayed_write_rate, PLUGIN_VAR_RQCMDARG, "DBOptions::delayed_write_rate", nullptr, rocksdb_set_delayed_write_rate, - rocksdb_db_options.delayed_write_rate, 0, + rocksdb_db_options->delayed_write_rate, 0, UINT64_MAX, 0); static MYSQL_SYSVAR_ENUM( @@ -606,20 +737,20 @@ static MYSQL_SYSVAR_UINT( /* max */ (uint)rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0); static MYSQL_SYSVAR_ULONG(compaction_readahead_size, - rocksdb_db_options.compaction_readahead_size, + rocksdb_db_options->compaction_readahead_size, PLUGIN_VAR_RQCMDARG, "DBOptions::compaction_readahead_size for RocksDB", nullptr, nullptr, - rocksdb_db_options.compaction_readahead_size, + rocksdb_db_options->compaction_readahead_size, /* min */ 0L, /* max */ ULONG_MAX, 0); static MYSQL_SYSVAR_BOOL( new_table_reader_for_compaction_inputs, *reinterpret_cast( - &rocksdb_db_options.new_table_reader_for_compaction_inputs), + &rocksdb_db_options->new_table_reader_for_compaction_inputs), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::new_table_reader_for_compaction_inputs for RocksDB", nullptr, - nullptr, rocksdb_db_options.new_table_reader_for_compaction_inputs); + nullptr, rocksdb_db_options->new_table_reader_for_compaction_inputs); static MYSQL_SYSVAR_UINT( access_hint_on_compaction_start, rocksdb_access_hint_on_compaction_start, @@ -632,42 +763,42 @@ static MYSQL_SYSVAR_UINT( static MYSQL_SYSVAR_BOOL( allow_concurrent_memtable_write, *reinterpret_cast( - &rocksdb_db_options.allow_concurrent_memtable_write), - PLUGIN_VAR_RQCMDARG, + &rocksdb_db_options->allow_concurrent_memtable_write), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::allow_concurrent_memtable_write for RocksDB", nullptr, nullptr, false); static MYSQL_SYSVAR_BOOL( enable_write_thread_adaptive_yield, *reinterpret_cast( - &rocksdb_db_options.enable_write_thread_adaptive_yield), - PLUGIN_VAR_RQCMDARG, + &rocksdb_db_options->enable_write_thread_adaptive_yield), + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::enable_write_thread_adaptive_yield for RocksDB", nullptr, nullptr, false); -static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options.max_open_files, +static MYSQL_SYSVAR_INT(max_open_files, rocksdb_db_options->max_open_files, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_open_files for RocksDB", nullptr, - nullptr, rocksdb_db_options.max_open_files, + nullptr, rocksdb_db_options->max_open_files, /* min */ -1, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_ULONG(max_total_wal_size, - rocksdb_db_options.max_total_wal_size, + rocksdb_db_options->max_total_wal_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_total_wal_size for RocksDB", nullptr, - nullptr, rocksdb_db_options.max_total_wal_size, + nullptr, rocksdb_db_options->max_total_wal_size, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_BOOL( - use_fsync, *reinterpret_cast(&rocksdb_db_options.use_fsync), + use_fsync, *reinterpret_cast(&rocksdb_db_options->use_fsync), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_fsync for RocksDB", nullptr, nullptr, - rocksdb_db_options.use_fsync); + rocksdb_db_options->use_fsync); static MYSQL_SYSVAR_STR(wal_dir, rocksdb_wal_dir, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::wal_dir for RocksDB", nullptr, nullptr, - rocksdb_db_options.wal_dir.c_str()); + rocksdb_db_options->wal_dir.c_str()); static MYSQL_SYSVAR_STR( persistent_cache_path, rocksdb_persistent_cache_path, @@ -684,186 +815,170 @@ static MYSQL_SYSVAR_ULONG( static MYSQL_SYSVAR_ULONG( delete_obsolete_files_period_micros, - rocksdb_db_options.delete_obsolete_files_period_micros, + rocksdb_db_options->delete_obsolete_files_period_micros, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::delete_obsolete_files_period_micros for RocksDB", nullptr, - nullptr, rocksdb_db_options.delete_obsolete_files_period_micros, + nullptr, rocksdb_db_options->delete_obsolete_files_period_micros, /* min */ 0L, /* max */ LONG_MAX, 0); -static MYSQL_SYSVAR_INT(base_background_compactions, - rocksdb_db_options.base_background_compactions, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::base_background_compactions for RocksDB", - nullptr, nullptr, - rocksdb_db_options.base_background_compactions, - /* min */ -1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); - -static MYSQL_SYSVAR_INT(max_background_compactions, - rocksdb_db_options.max_background_compactions, +static MYSQL_SYSVAR_INT(max_background_jobs, + rocksdb_db_options->max_background_jobs, PLUGIN_VAR_RQCMDARG, - "DBOptions::max_background_compactions for RocksDB", - nullptr, rocksdb_set_max_background_compactions, - rocksdb_db_options.max_background_compactions, - /* min */ 1, /* max */ MAX_BACKGROUND_COMPACTIONS, 0); - -static MYSQL_SYSVAR_INT(max_background_flushes, - rocksdb_db_options.max_background_flushes, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::max_background_flushes for RocksDB", - nullptr, nullptr, - rocksdb_db_options.max_background_flushes, - /* min */ 1, /* max */ MAX_BACKGROUND_FLUSHES, 0); + "DBOptions::max_background_jobs for RocksDB", nullptr, + rocksdb_set_max_background_jobs, + rocksdb_db_options->max_background_jobs, + /* min */ -1, /* max */ MAX_BACKGROUND_JOBS, 0); static MYSQL_SYSVAR_UINT(max_subcompactions, - rocksdb_db_options.max_subcompactions, + rocksdb_db_options->max_subcompactions, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_subcompactions for RocksDB", nullptr, - nullptr, rocksdb_db_options.max_subcompactions, + nullptr, rocksdb_db_options->max_subcompactions, /* min */ 1, /* max */ MAX_SUBCOMPACTIONS, 0); static MYSQL_SYSVAR_ULONG(max_log_file_size, - rocksdb_db_options.max_log_file_size, + rocksdb_db_options->max_log_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_log_file_size for RocksDB", nullptr, - nullptr, rocksdb_db_options.max_log_file_size, + nullptr, rocksdb_db_options->max_log_file_size, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(log_file_time_to_roll, - rocksdb_db_options.log_file_time_to_roll, + rocksdb_db_options->log_file_time_to_roll, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::log_file_time_to_roll for RocksDB", nullptr, nullptr, - rocksdb_db_options.log_file_time_to_roll, + rocksdb_db_options->log_file_time_to_roll, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(keep_log_file_num, - rocksdb_db_options.keep_log_file_num, + rocksdb_db_options->keep_log_file_num, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::keep_log_file_num for RocksDB", nullptr, - nullptr, rocksdb_db_options.keep_log_file_num, + nullptr, rocksdb_db_options->keep_log_file_num, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(max_manifest_file_size, - rocksdb_db_options.max_manifest_file_size, + rocksdb_db_options->max_manifest_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::max_manifest_file_size for RocksDB", nullptr, nullptr, - rocksdb_db_options.max_manifest_file_size, + rocksdb_db_options->max_manifest_file_size, /* min */ 0L, /* max */ ULONG_MAX, 0); static MYSQL_SYSVAR_INT(table_cache_numshardbits, - rocksdb_db_options.table_cache_numshardbits, + rocksdb_db_options->table_cache_numshardbits, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::table_cache_numshardbits for RocksDB", nullptr, nullptr, - rocksdb_db_options.table_cache_numshardbits, + rocksdb_db_options->table_cache_numshardbits, /* min */ 0, /* max */ INT_MAX, 0); -static MYSQL_SYSVAR_ULONG(wal_ttl_seconds, rocksdb_db_options.WAL_ttl_seconds, +static MYSQL_SYSVAR_ULONG(wal_ttl_seconds, rocksdb_db_options->WAL_ttl_seconds, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::WAL_ttl_seconds for RocksDB", nullptr, - nullptr, rocksdb_db_options.WAL_ttl_seconds, + nullptr, rocksdb_db_options->WAL_ttl_seconds, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(wal_size_limit_mb, - rocksdb_db_options.WAL_size_limit_MB, + rocksdb_db_options->WAL_size_limit_MB, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::WAL_size_limit_MB for RocksDB", nullptr, - nullptr, rocksdb_db_options.WAL_size_limit_MB, + nullptr, rocksdb_db_options->WAL_size_limit_MB, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(manifest_preallocation_size, - rocksdb_db_options.manifest_preallocation_size, + rocksdb_db_options->manifest_preallocation_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::manifest_preallocation_size for RocksDB", nullptr, nullptr, - rocksdb_db_options.manifest_preallocation_size, + rocksdb_db_options->manifest_preallocation_size, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_BOOL( use_direct_reads, - *reinterpret_cast(&rocksdb_db_options.use_direct_reads), + *reinterpret_cast(&rocksdb_db_options->use_direct_reads), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_direct_reads for RocksDB", nullptr, nullptr, - rocksdb_db_options.use_direct_reads); + rocksdb_db_options->use_direct_reads); static MYSQL_SYSVAR_BOOL( - use_direct_writes, - *reinterpret_cast(&rocksdb_db_options.use_direct_writes), + use_direct_io_for_flush_and_compaction, + *reinterpret_cast( + &rocksdb_db_options->use_direct_io_for_flush_and_compaction), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::use_direct_writes for RocksDB", nullptr, nullptr, - rocksdb_db_options.use_direct_writes); + "DBOptions::use_direct_io_for_flush_and_compaction for RocksDB", nullptr, + nullptr, rocksdb_db_options->use_direct_io_for_flush_and_compaction); static MYSQL_SYSVAR_BOOL( allow_mmap_reads, - *reinterpret_cast(&rocksdb_db_options.allow_mmap_reads), + *reinterpret_cast(&rocksdb_db_options->allow_mmap_reads), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::allow_mmap_reads for RocksDB", nullptr, nullptr, - rocksdb_db_options.allow_mmap_reads); + rocksdb_db_options->allow_mmap_reads); static MYSQL_SYSVAR_BOOL( allow_mmap_writes, - *reinterpret_cast(&rocksdb_db_options.allow_mmap_writes), + *reinterpret_cast(&rocksdb_db_options->allow_mmap_writes), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::allow_mmap_writes for RocksDB", nullptr, nullptr, - rocksdb_db_options.allow_mmap_writes); + rocksdb_db_options->allow_mmap_writes); static MYSQL_SYSVAR_BOOL( is_fd_close_on_exec, - *reinterpret_cast(&rocksdb_db_options.is_fd_close_on_exec), + *reinterpret_cast(&rocksdb_db_options->is_fd_close_on_exec), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::is_fd_close_on_exec for RocksDB", nullptr, nullptr, - rocksdb_db_options.is_fd_close_on_exec); + rocksdb_db_options->is_fd_close_on_exec); static MYSQL_SYSVAR_UINT(stats_dump_period_sec, - rocksdb_db_options.stats_dump_period_sec, + rocksdb_db_options->stats_dump_period_sec, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::stats_dump_period_sec for RocksDB", nullptr, nullptr, - rocksdb_db_options.stats_dump_period_sec, + rocksdb_db_options->stats_dump_period_sec, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_BOOL( advise_random_on_open, - *reinterpret_cast(&rocksdb_db_options.advise_random_on_open), + *reinterpret_cast(&rocksdb_db_options->advise_random_on_open), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::advise_random_on_open for RocksDB", nullptr, nullptr, - rocksdb_db_options.advise_random_on_open); + rocksdb_db_options->advise_random_on_open); static MYSQL_SYSVAR_ULONG(db_write_buffer_size, - rocksdb_db_options.db_write_buffer_size, + rocksdb_db_options->db_write_buffer_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::db_write_buffer_size for RocksDB", nullptr, nullptr, - rocksdb_db_options.db_write_buffer_size, + rocksdb_db_options->db_write_buffer_size, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_BOOL( use_adaptive_mutex, - *reinterpret_cast(&rocksdb_db_options.use_adaptive_mutex), + *reinterpret_cast(&rocksdb_db_options->use_adaptive_mutex), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::use_adaptive_mutex for RocksDB", nullptr, nullptr, - rocksdb_db_options.use_adaptive_mutex); + rocksdb_db_options->use_adaptive_mutex); -static MYSQL_SYSVAR_ULONG(bytes_per_sync, rocksdb_db_options.bytes_per_sync, +static MYSQL_SYSVAR_ULONG(bytes_per_sync, rocksdb_db_options->bytes_per_sync, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::bytes_per_sync for RocksDB", nullptr, - nullptr, rocksdb_db_options.bytes_per_sync, + nullptr, rocksdb_db_options->bytes_per_sync, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_ULONG(wal_bytes_per_sync, - rocksdb_db_options.wal_bytes_per_sync, + rocksdb_db_options->wal_bytes_per_sync, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "DBOptions::wal_bytes_per_sync for RocksDB", nullptr, - nullptr, rocksdb_db_options.wal_bytes_per_sync, + nullptr, rocksdb_db_options->wal_bytes_per_sync, /* min */ 0L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_BOOL( enable_thread_tracking, - *reinterpret_cast(&rocksdb_db_options.enable_thread_tracking), + *reinterpret_cast(&rocksdb_db_options->enable_thread_tracking), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DBOptions::enable_thread_tracking for RocksDB", nullptr, nullptr, - rocksdb_db_options.enable_thread_tracking); + "DBOptions::enable_thread_tracking for RocksDB", nullptr, nullptr, true); static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -876,7 +991,7 @@ static MYSQL_SYSVAR_LONGLONG(block_cache_size, rocksdb_block_cache_size, static MYSQL_SYSVAR_BOOL( cache_index_and_filter_blocks, *reinterpret_cast( - &rocksdb_tbl_options.cache_index_and_filter_blocks), + &rocksdb_tbl_options->cache_index_and_filter_blocks), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::cache_index_and_filter_blocks for RocksDB", nullptr, nullptr, true); @@ -892,7 +1007,7 @@ static MYSQL_SYSVAR_BOOL( static MYSQL_SYSVAR_BOOL( pin_l0_filter_and_index_blocks_in_cache, *reinterpret_cast( - &rocksdb_tbl_options.pin_l0_filter_and_index_blocks_in_cache), + &rocksdb_tbl_options->pin_l0_filter_and_index_blocks_in_cache), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "pin_l0_filter_and_index_blocks_in_cache for RocksDB", nullptr, nullptr, true); @@ -901,50 +1016,50 @@ static MYSQL_SYSVAR_ENUM(index_type, rocksdb_index_type, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::index_type for RocksDB", nullptr, nullptr, - (uint64_t)rocksdb_tbl_options.index_type, + (uint64_t)rocksdb_tbl_options->index_type, &index_type_typelib); static MYSQL_SYSVAR_BOOL( hash_index_allow_collision, *reinterpret_cast( - &rocksdb_tbl_options.hash_index_allow_collision), + &rocksdb_tbl_options->hash_index_allow_collision), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::hash_index_allow_collision for RocksDB", nullptr, - nullptr, rocksdb_tbl_options.hash_index_allow_collision); + nullptr, rocksdb_tbl_options->hash_index_allow_collision); static MYSQL_SYSVAR_BOOL( no_block_cache, - *reinterpret_cast(&rocksdb_tbl_options.no_block_cache), + *reinterpret_cast(&rocksdb_tbl_options->no_block_cache), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::no_block_cache for RocksDB", nullptr, nullptr, - rocksdb_tbl_options.no_block_cache); + rocksdb_tbl_options->no_block_cache); -static MYSQL_SYSVAR_ULONG(block_size, rocksdb_tbl_options.block_size, +static MYSQL_SYSVAR_ULONG(block_size, rocksdb_tbl_options->block_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_size for RocksDB", - nullptr, nullptr, rocksdb_tbl_options.block_size, + nullptr, nullptr, rocksdb_tbl_options->block_size, /* min */ 1L, /* max */ LONG_MAX, 0); static MYSQL_SYSVAR_INT( - block_size_deviation, rocksdb_tbl_options.block_size_deviation, + block_size_deviation, rocksdb_tbl_options->block_size_deviation, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_size_deviation for RocksDB", nullptr, - nullptr, rocksdb_tbl_options.block_size_deviation, + nullptr, rocksdb_tbl_options->block_size_deviation, /* min */ 0, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_INT( - block_restart_interval, rocksdb_tbl_options.block_restart_interval, + block_restart_interval, rocksdb_tbl_options->block_restart_interval, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::block_restart_interval for RocksDB", nullptr, - nullptr, rocksdb_tbl_options.block_restart_interval, + nullptr, rocksdb_tbl_options->block_restart_interval, /* min */ 1, /* max */ INT_MAX, 0); static MYSQL_SYSVAR_BOOL( whole_key_filtering, - *reinterpret_cast(&rocksdb_tbl_options.whole_key_filtering), + *reinterpret_cast(&rocksdb_tbl_options->whole_key_filtering), PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "BlockBasedTableOptions::whole_key_filtering for RocksDB", nullptr, nullptr, - rocksdb_tbl_options.whole_key_filtering); + rocksdb_tbl_options->whole_key_filtering); static MYSQL_SYSVAR_STR(default_cf_options, rocksdb_default_cf_options, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -955,12 +1070,14 @@ static MYSQL_SYSVAR_STR(override_cf_options, rocksdb_override_cf_options, "option overrides per cf for RocksDB", nullptr, nullptr, ""); -static MYSQL_SYSVAR_BOOL(background_sync, rocksdb_background_sync, - PLUGIN_VAR_RQCMDARG, - "turns on background syncs for RocksDB", nullptr, - nullptr, FALSE); +static MYSQL_SYSVAR_STR(update_cf_options, rocksdb_update_cf_options, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC | + PLUGIN_VAR_ALLOCATED, + "Option updates per column family for RocksDB", nullptr, + rocksdb_set_update_cf_options, nullptr); -static MYSQL_THDVAR_UINT(flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG, +static MYSQL_SYSVAR_UINT(flush_log_at_trx_commit, + rocksdb_flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG, "Sync on transaction commit. Similar to " "innodb_flush_log_at_trx_commit. 1: sync on commit, " "0,2: not sync on commit", @@ -1034,6 +1151,58 @@ static MYSQL_SYSVAR_BOOL(pause_background_work, rocksdb_pause_background_work, "Disable all rocksdb background operations", nullptr, rocksdb_set_pause_background_work, FALSE); +static MYSQL_SYSVAR_BOOL( + enable_ttl, rocksdb_enable_ttl, PLUGIN_VAR_RQCMDARG, + "Enable expired TTL records to be dropped during compaction.", nullptr, + nullptr, TRUE); + +static MYSQL_SYSVAR_BOOL( + enable_ttl_read_filtering, rocksdb_enable_ttl_read_filtering, + PLUGIN_VAR_RQCMDARG, + "For tables with TTL, expired records are skipped/filtered out during " + "processing and in query results. Disabling this will allow these records " + "to be seen, but as a result rows may disappear in the middle of " + "transactions as they are dropped during compaction. Use with caution.", + nullptr, nullptr, TRUE); + +static MYSQL_SYSVAR_INT( + debug_ttl_rec_ts, rocksdb_debug_ttl_rec_ts, PLUGIN_VAR_RQCMDARG, + "For debugging purposes only. Overrides the TTL of records to " + "now() + debug_ttl_rec_ts. The value can be +/- to simulate " + "a record inserted in the past vs a record inserted in the 'future'. " + "A value of 0 denotes that the variable is not set. This variable is a " + "no-op in non-debug builds.", + nullptr, nullptr, 0, /* min */ -3600, /* max */ 3600, 0); + +static MYSQL_SYSVAR_INT( + debug_ttl_snapshot_ts, rocksdb_debug_ttl_snapshot_ts, PLUGIN_VAR_RQCMDARG, + "For debugging purposes only. Sets the snapshot during compaction to " + "now() + debug_set_ttl_snapshot_ts. The value can be +/- to simulate " + "a snapshot in the past vs a snapshot created in the 'future'. " + "A value of 0 denotes that the variable is not set. This variable is a " + "no-op in non-debug builds.", + nullptr, nullptr, 0, /* min */ -3600, /* max */ 3600, 0); + +static MYSQL_SYSVAR_INT( + debug_ttl_read_filter_ts, rocksdb_debug_ttl_read_filter_ts, + PLUGIN_VAR_RQCMDARG, + "For debugging purposes only. Overrides the TTL read filtering time to " + "time + debug_ttl_read_filter_ts. A value of 0 denotes that the variable " + "is not set. This variable is a no-op in non-debug builds.", + nullptr, nullptr, 0, /* min */ -3600, /* max */ 3600, 0); + +static MYSQL_SYSVAR_BOOL( + reset_stats, rocksdb_reset_stats, PLUGIN_VAR_RQCMDARG, + "Reset the RocksDB internal statistics without restarting the DB.", nullptr, + rocksdb_set_reset_stats, FALSE); + +static MYSQL_SYSVAR_UINT(io_write_timeout, rocksdb_io_write_timeout_secs, + PLUGIN_VAR_RQCMDARG, + "Timeout for experimental I/O watchdog.", nullptr, + rocksdb_set_io_write_timeout, /* default */ 0, + /* min */ 0L, + /* max */ UINT_MAX, 0); + static MYSQL_SYSVAR_BOOL(enable_2pc, rocksdb_enable_2pc, PLUGIN_VAR_RQCMDARG, "Enable two phase commit for MyRocks", nullptr, nullptr, TRUE); @@ -1062,6 +1231,13 @@ static MYSQL_SYSVAR_BOOL( rocksdb_force_flush_memtable_now, rocksdb_force_flush_memtable_now_stub, FALSE); +static MYSQL_SYSVAR_BOOL( + force_flush_memtable_and_lzero_now, + rocksdb_force_flush_memtable_and_lzero_now_var, PLUGIN_VAR_RQCMDARG, + "Acts similar to force_flush_memtable_now, but also compacts all L0 files.", + rocksdb_force_flush_memtable_and_lzero_now, + rocksdb_force_flush_memtable_and_lzero_now_stub, FALSE); + static MYSQL_THDVAR_BOOL( flush_memtable_on_analyze, PLUGIN_VAR_RQCMDARG, "Forces memtable flush on ANALZYE table to get accurate cardinality", @@ -1107,7 +1283,6 @@ static MYSQL_SYSVAR_BOOL( "Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr, nullptr, rocksdb_compaction_sequential_deletes_count_sd); - static MYSQL_SYSVAR_BOOL( print_snapshot_conflict_queries, rocksdb_print_snapshot_conflict_queries, PLUGIN_VAR_RQCMDARG, @@ -1177,6 +1352,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(lock_wait_timeout), MYSQL_SYSVAR(deadlock_detect), MYSQL_SYSVAR(max_row_locks), + MYSQL_SYSVAR(write_batch_max_bytes), MYSQL_SYSVAR(lock_scanned_rows), MYSQL_SYSVAR(bulk_load), MYSQL_SYSVAR(skip_unique_check_tables), @@ -1196,6 +1372,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(error_if_exists), MYSQL_SYSVAR(paranoid_checks), MYSQL_SYSVAR(rate_limiter_bytes_per_sec), + MYSQL_SYSVAR(sst_mgr_rate_bytes_per_sec), MYSQL_SYSVAR(delayed_write_rate), MYSQL_SYSVAR(info_log_level), MYSQL_SYSVAR(max_open_files), @@ -1205,9 +1382,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(persistent_cache_path), MYSQL_SYSVAR(persistent_cache_size_mb), MYSQL_SYSVAR(delete_obsolete_files_period_micros), - MYSQL_SYSVAR(base_background_compactions), - MYSQL_SYSVAR(max_background_compactions), - MYSQL_SYSVAR(max_background_flushes), + MYSQL_SYSVAR(max_background_jobs), MYSQL_SYSVAR(max_log_file_size), MYSQL_SYSVAR(max_subcompactions), MYSQL_SYSVAR(log_file_time_to_roll), @@ -1218,7 +1393,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(wal_size_limit_mb), MYSQL_SYSVAR(manifest_preallocation_size), MYSQL_SYSVAR(use_direct_reads), - MYSQL_SYSVAR(use_direct_writes), + MYSQL_SYSVAR(use_direct_io_for_flush_and_compaction), MYSQL_SYSVAR(allow_mmap_reads), MYSQL_SYSVAR(allow_mmap_writes), MYSQL_SYSVAR(is_fd_close_on_exec), @@ -1250,8 +1425,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(default_cf_options), MYSQL_SYSVAR(override_cf_options), - - MYSQL_SYSVAR(background_sync), + MYSQL_SYSVAR(update_cf_options), MYSQL_SYSVAR(flush_log_at_trx_commit), MYSQL_SYSVAR(write_disable_wal), @@ -1274,6 +1448,14 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = { MYSQL_SYSVAR(strict_collation_exceptions), MYSQL_SYSVAR(collect_sst_properties), MYSQL_SYSVAR(force_flush_memtable_now), + MYSQL_SYSVAR(force_flush_memtable_and_lzero_now), + MYSQL_SYSVAR(enable_ttl), + MYSQL_SYSVAR(enable_ttl_read_filtering), + MYSQL_SYSVAR(debug_ttl_rec_ts), + MYSQL_SYSVAR(debug_ttl_snapshot_ts), + MYSQL_SYSVAR(debug_ttl_read_filter_ts), + MYSQL_SYSVAR(reset_stats), + MYSQL_SYSVAR(io_write_timeout), MYSQL_SYSVAR(flush_memtable_on_analyze), MYSQL_SYSVAR(seconds_between_stat_computes), @@ -1299,7 +1481,7 @@ static rocksdb::WriteOptions rdb_get_rocksdb_write_options(my_core::THD *const thd) { rocksdb::WriteOptions opt; - opt.sync = THDVAR(thd, flush_log_at_trx_commit) == 1; + opt.sync = (rocksdb_flush_log_at_trx_commit == 1); opt.disableWAL = THDVAR(thd, write_disable_wal); opt.ignore_missing_column_families = THDVAR(thd, write_ignore_missing_column_families); @@ -1395,6 +1577,9 @@ class Rdb_snapshot_notifier : public rocksdb::TransactionNotifier { class Rdb_transaction { protected: ulonglong m_write_count = 0; + ulonglong m_insert_count = 0; + ulonglong m_update_count = 0; + ulonglong m_delete_count = 0; ulonglong m_lock_count = 0; bool m_is_delayed_snapshot = false; @@ -1437,6 +1622,12 @@ class Rdb_transaction { int64_t m_snapshot_timestamp = 0; bool m_ddl_transaction; + /* + Tracks the number of tables in use through external_lock. + This should not be reset during start_tx(). + */ + int64_t m_n_mysql_tables_in_use = 0; + /* for distinction between rdb_transaction_impl and rdb_writebatch_impl when using walk tx list @@ -1464,7 +1655,8 @@ class Rdb_transaction { } int set_status_error(THD *const thd, const rocksdb::Status &s, - const Rdb_key_def &kd, Rdb_tbl_def *const tbl_def) { + const Rdb_key_def &kd, Rdb_tbl_def *const tbl_def, + Rdb_table_handler *const table_handler) { DBUG_ASSERT(!s.ok()); DBUG_ASSERT(tbl_def != nullptr); @@ -1480,6 +1672,7 @@ class Rdb_transaction { my_core::thd_mark_transaction_to_rollback(thd, false /*just statement*/); m_detailed_error.copy(timeout_message( "index", tbl_def->full_tablename().c_str(), kd.get_name().c_str())); + table_handler->m_lock_wait_timeout_counter.inc(); return HA_ERR_LOCK_WAIT_TIMEOUT; } @@ -1487,6 +1680,8 @@ class Rdb_transaction { if (s.IsDeadlock()) { my_core::thd_mark_transaction_to_rollback(thd, false /* just statement */); + m_detailed_error = String(); + table_handler->m_deadlock_counter.inc(); return HA_ERR_LOCK_DEADLOCK; } else if (s.IsBusy()) { rocksdb_snapshot_conflict_errors++; @@ -1498,18 +1693,16 @@ class Rdb_transaction { "Query: %s", user_host_buff, thd->query()); } + m_detailed_error = String(" (snapshot conflict)", system_charset_info); + table_handler->m_deadlock_counter.inc(); return HA_ERR_LOCK_DEADLOCK; } - if (s.IsLockLimit()) { - return HA_ERR_ROCKSDB_TOO_MANY_LOCKS; - } - if (s.IsIOError() || s.IsCorruption()) { rdb_handle_io_error(s, RDB_IO_ERROR_GENERAL); } - my_error(ER_INTERNAL_ERROR, MYF(0), s.ToString().c_str()); - return HA_ERR_INTERNAL_ERROR; + + return ha_rocksdb::rdb_error_to_mysql(s); } THD *get_thd() const { return m_thd; } @@ -1560,6 +1753,18 @@ class Rdb_transaction { ulonglong get_write_count() const { return m_write_count; } + ulonglong get_insert_count() const { return m_insert_count; } + + ulonglong get_update_count() const { return m_update_count; } + + ulonglong get_delete_count() const { return m_delete_count; } + + void incr_insert_count() { ++m_insert_count; } + + void incr_update_count() { ++m_update_count; } + + void incr_delete_count() { ++m_delete_count; } + int get_timeout_sec() const { return m_timeout_sec; } ulonglong get_lock_count() const { return m_lock_count; } @@ -1595,7 +1800,7 @@ class Rdb_transaction { In both cases, rolling back transaction is safe. Nothing is written to binlog. */ - my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); + my_error(ER_ROLLBACK_ONLY, MYF(0)); rollback(); return true; } else { @@ -1772,7 +1977,7 @@ class Rdb_transaction { bool can_prepare() const { if (m_rollback_only) { - my_printf_error(ER_UNKNOWN_ERROR, ERRSTR_ROLLBACK_ONLY, MYF(0)); + my_error(ER_ROLLBACK_ONLY, MYF(0)); return false; } return true; @@ -1780,10 +1985,7 @@ class Rdb_transaction { int rollback_to_savepoint(void *const savepoint) { if (has_modifications()) { - my_printf_error(ER_UNKNOWN_ERROR, - "MyRocks currently does not support ROLLBACK TO " - "SAVEPOINT if modifying rows.", - MYF(0)); + my_error(ER_ROLLBACK_TO_SAVEPOINT, MYF(0)); m_rollback_only = true; return HA_EXIT_FAILURE; } @@ -1886,6 +2088,9 @@ class Rdb_transaction_impl : public Rdb_transaction { release_tx(); m_write_count = 0; + m_insert_count = 0; + m_update_count = 0; + m_delete_count = 0; m_lock_count = 0; set_tx_read_only(false); m_rollback_only = false; @@ -1895,6 +2100,9 @@ class Rdb_transaction_impl : public Rdb_transaction { public: void rollback() override { m_write_count = 0; + m_insert_count = 0; + m_update_count = 0; + m_delete_count = 0; m_lock_count = 0; m_ddl_transaction = false; if (m_rocksdb_tx) { @@ -1998,6 +2206,7 @@ class Rdb_transaction_impl : public Rdb_transaction { rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, std::string *value) const override { + global_stats.queries[QUERIES_POINT].inc(); return m_rocksdb_tx->Get(m_read_opts, column_family, key, value); } @@ -2015,6 +2224,7 @@ class Rdb_transaction_impl : public Rdb_transaction { rocksdb::Iterator * get_iterator(const rocksdb::ReadOptions &options, rocksdb::ColumnFamilyHandle *const column_family) override { + global_stats.queries[QUERIES_RANGE].inc(); return m_rocksdb_tx->GetIterator(options, column_family); } @@ -2028,8 +2238,9 @@ class Rdb_transaction_impl : public Rdb_transaction { tx_opts.set_snapshot = false; tx_opts.lock_timeout = rdb_convert_sec_to_ms(m_timeout_sec); tx_opts.deadlock_detect = THDVAR(m_thd, deadlock_detect); + tx_opts.max_write_batch_size = THDVAR(m_thd, write_batch_max_bytes); - write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1; + write_opts.sync = (rocksdb_flush_log_at_trx_commit == 1); write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); @@ -2140,6 +2351,9 @@ class Rdb_writebatch_impl : public Rdb_transaction { reset(); m_write_count = 0; + m_insert_count = 0; + m_update_count = 0; + m_delete_count = 0; set_tx_read_only(false); m_rollback_only = false; return res; @@ -2161,6 +2375,9 @@ class Rdb_writebatch_impl : public Rdb_transaction { void rollback() override { m_write_count = 0; + m_insert_count = 0; + m_update_count = 0; + m_delete_count = 0; m_lock_count = 0; release_snapshot(); @@ -2242,7 +2459,7 @@ class Rdb_writebatch_impl : public Rdb_transaction { void start_tx() override { reset(); - write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1; + write_opts.sync = (rocksdb_flush_log_at_trx_commit == 1); write_opts.disableWAL = THDVAR(m_thd, write_disable_wal); write_opts.ignore_missing_column_families = THDVAR(m_thd, write_ignore_missing_column_families); @@ -2397,6 +2614,7 @@ static bool rocksdb_flush_wal(handlerton *const hton MY_ATTRIBUTE((__unused__)), rocksdb_wal_group_syncs++; const rocksdb::Status s = rdb->SyncWAL(); if (!s.ok()) { + rdb_log_status_error(s); return HA_EXIT_FAILURE; } return HA_EXIT_SUCCESS; @@ -2433,7 +2651,7 @@ static int rocksdb_prepare(handlerton *const hton, THD *const thd, return HA_EXIT_FAILURE; } if (thd->durability_property == HA_IGNORE_DURABILITY && - (THDVAR(thd, flush_log_at_trx_commit) == 1)) { + (rocksdb_flush_log_at_trx_commit == 1)) { /** we set the log sequence as '1' just to trigger hton->flush_logs */ @@ -2452,33 +2670,65 @@ static int rocksdb_prepare(handlerton *const hton, THD *const thd, this is needed to avoid crashes in XA scenarios */ static int rocksdb_commit_by_xid(handlerton *const hton, XID *const xid) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(hton != nullptr); + DBUG_ASSERT(xid != nullptr); + DBUG_ASSERT(commit_latency_stats != nullptr); + + rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true); + const auto name = rdb_xid_to_string(*xid); + DBUG_ASSERT(!name.empty()); + rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); + if (trx == nullptr) { - return HA_EXIT_FAILURE; + DBUG_RETURN(HA_EXIT_FAILURE); } + const rocksdb::Status s = trx->Commit(); + if (!s.ok()) { - return HA_EXIT_FAILURE; + rdb_log_status_error(s); + DBUG_RETURN(HA_EXIT_FAILURE); } + delete trx; - return HA_EXIT_SUCCESS; + + // `Add()` is implemented in a thread-safe manner. + commit_latency_stats->Add(timer.ElapsedNanos() / 1000); + + DBUG_RETURN(HA_EXIT_SUCCESS); } static int rocksdb_rollback_by_xid(handlerton *const hton MY_ATTRIBUTE((__unused__)), XID *const xid) { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(hton != nullptr); + DBUG_ASSERT(xid != nullptr); + DBUG_ASSERT(rdb != nullptr); + const auto name = rdb_xid_to_string(*xid); + rocksdb::Transaction *const trx = rdb->GetTransactionByName(name); + if (trx == nullptr) { - return HA_EXIT_FAILURE; + DBUG_RETURN(HA_EXIT_FAILURE); } + const rocksdb::Status s = trx->Rollback(); + if (!s.ok()) { - return HA_EXIT_FAILURE; + rdb_log_status_error(s); + DBUG_RETURN(HA_EXIT_FAILURE); } + delete trx; - return HA_EXIT_SUCCESS; + + DBUG_RETURN(HA_EXIT_SUCCESS); } /** @@ -2558,6 +2808,9 @@ static int rocksdb_commit(handlerton *const hton, THD *const thd, DBUG_ASSERT(hton != nullptr); DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(commit_latency_stats != nullptr); + + rocksdb::StopWatchNano timer(rocksdb::Env::Default(), true); /* this will trigger saving of perf_context information */ Rdb_perf_context_guard guard(thd); @@ -2573,8 +2826,9 @@ static int rocksdb_commit(handlerton *const hton, THD *const thd, - For a COMMIT statement that finishes a multi-statement transaction - For a statement that has its own transaction */ - if (tx->commit()) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + if (tx->commit()) { + DBUG_RETURN(HA_ERR_ROCKSDB_COMMIT_FAILED); + } } else { /* We get here when committing a statement within a transaction. @@ -2592,6 +2846,9 @@ static int rocksdb_commit(handlerton *const hton, THD *const thd, } } + // `Add()` is implemented in a thread-safe manner. + commit_latency_stats->Add(timer.ElapsedNanos() / 1000); + DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -2740,11 +2997,14 @@ class Rdb_snapshot_status : public Rdb_tx_list_walker { THD *thd = tx->get_thd(); char buffer[1024]; thd_security_context(thd, buffer, sizeof buffer, 0); - m_data += format_string("---SNAPSHOT, ACTIVE %lld sec\n" + m_data += format_string( + "---SNAPSHOT, ACTIVE %lld sec\n" "%s\n" - "lock count %llu, write count %llu\n", - curr_time - snapshot_timestamp, buffer, - tx->get_lock_count(), tx->get_write_count()); + "lock count %llu, write count %llu\n" + "insert count %llu, update count %llu, delete count %llu\n", + curr_time - snapshot_timestamp, buffer, tx->get_lock_count(), + tx->get_write_count(), tx->get_insert_count(), tx->get_update_count(), + tx->get_delete_count()); } } }; @@ -2850,19 +3110,67 @@ static bool rocksdb_show_snapshot_status(handlerton *const hton, THD *const thd, } /* - This is called for SHOW ENGINE ROCKSDB STATUS|LOGS|etc. + This is called for SHOW ENGINE ROCKSDB STATUS | LOGS | etc. For now, produce info about live files (which gives an imprecise idea about - what column families are there) + what column families are there). */ - static bool rocksdb_show_status(handlerton *const hton, THD *const thd, stat_print_fn *const stat_print, enum ha_stat_type stat_type) { + DBUG_ASSERT(hton != nullptr); + DBUG_ASSERT(thd != nullptr); + DBUG_ASSERT(stat_print != nullptr); + bool res = false; + char buf[100] = {'\0'}; + if (stat_type == HA_ENGINE_STATUS) { + DBUG_ASSERT(rdb != nullptr); + std::string str; + /* Global DB Statistics */ + if (rocksdb_stats) { + str = rocksdb_stats->ToString(); + + // Use the same format as internal RocksDB statistics entries to make + // sure that output will look unified. + DBUG_ASSERT(commit_latency_stats != nullptr); + + snprintf(buf, sizeof(buf), "rocksdb.commit_latency statistics " + "Percentiles :=> 50 : %.2f 95 : %.2f " + "99 : %.2f 100 : %.2f\n", + commit_latency_stats->Percentile(50), + commit_latency_stats->Percentile(95), + commit_latency_stats->Percentile(99), + commit_latency_stats->Percentile(100)); + str.append(buf); + + uint64_t v = 0; + + // Retrieve additional stalling related numbers from RocksDB and append + // them to the buffer meant for displaying detailed statistics. The intent + // here is to avoid adding another row to the query output because of + // just two numbers. + // + // NB! We're replacing hyphens with underscores in output to better match + // the existing naming convention. + if (rdb->GetIntProperty("rocksdb.is-write-stopped", &v)) { + snprintf(buf, sizeof(buf), "rocksdb.is_write_stopped COUNT : %lu\n", v); + str.append(buf); + } + + if (rdb->GetIntProperty("rocksdb.actual-delayed-write-rate", &v)) { + snprintf(buf, sizeof(buf), "rocksdb.actual_delayed_write_rate " + "COUNT : %lu\n", + v); + str.append(buf); + } + + res |= print_stats(thd, "STATISTICS", "rocksdb", str, stat_print); + } + /* Per DB stats */ if (rdb->GetProperty("rocksdb.dbstats", &str)) { res |= print_stats(thd, "DBSTATS", "rocksdb", str, stat_print); @@ -2870,19 +3178,14 @@ static bool rocksdb_show_status(handlerton *const hton, THD *const thd, /* Per column family stats */ for (const auto &cf_name : cf_manager.get_cf_names()) { - rocksdb::ColumnFamilyHandle *cfh; - bool is_automatic; - - /* - Only the cf name is important. Whether it was generated automatically - does not matter, so is_automatic is ignored. - */ - cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); - if (cfh == nullptr) + rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); + if (cfh == nullptr) { continue; + } - if (!rdb->GetProperty(cfh, "rocksdb.cfstats", &str)) + if (!rdb->GetProperty(cfh, "rocksdb.cfstats", &str)) { continue; + } res |= print_stats(thd, "CF_COMPACTION", cf_name, str, stat_print); } @@ -2892,20 +3195,23 @@ static bool rocksdb_show_status(handlerton *const hton, THD *const thd, std::unordered_set cache_set; size_t internal_cache_count = 0; size_t kDefaultInternalCacheSize = 8 * 1024 * 1024; - char buf[100]; dbs.push_back(rdb); - cache_set.insert(rocksdb_tbl_options.block_cache.get()); + cache_set.insert(rocksdb_tbl_options->block_cache.get()); + for (const auto &cf_handle : cf_manager.get_all_cf()) { rocksdb::ColumnFamilyDescriptor cf_desc; cf_handle->GetDescriptor(&cf_desc); auto *const table_factory = cf_desc.options.table_factory.get(); + if (table_factory != nullptr) { std::string tf_name = table_factory->Name(); + if (tf_name.find("BlockBasedTable") != std::string::npos) { const rocksdb::BlockBasedTableOptions *const bbt_opt = reinterpret_cast( table_factory->GetOptions()); + if (bbt_opt != nullptr) { if (bbt_opt->block_cache.get() != nullptr) { cache_set.insert(bbt_opt->block_cache.get()); @@ -2922,6 +3228,7 @@ static bool rocksdb_show_status(handlerton *const hton, THD *const thd, str.clear(); rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, &temp_usage_by_type); + snprintf(buf, sizeof(buf), "\nMemTable Total: %lu", temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]); str.append(buf); @@ -2937,7 +3244,44 @@ static bool rocksdb_show_status(handlerton *const hton, THD *const thd, snprintf(buf, sizeof(buf), "\nDefault Cache Capacity: %lu", internal_cache_count * kDefaultInternalCacheSize); str.append(buf); - res |= print_stats(thd, "Memory_Stats", "rocksdb", str, stat_print); + res |= print_stats(thd, "MEMORY_STATS", "rocksdb", str, stat_print); + + /* Show the background thread status */ + std::vector thread_list; + rocksdb::Status s = rdb->GetEnv()->GetThreadList(&thread_list); + + if (!s.ok()) { + sql_print_error("RocksDB: Returned error (%s) from GetThreadList.\n", + s.ToString().c_str()); + res |= true; + } else { + /* For each background thread retrieved, print out its information */ + for (auto &it : thread_list) { + /* Only look at background threads. Ignore user threads, if any. */ + if (it.thread_type > rocksdb::ThreadStatus::LOW_PRIORITY) { + continue; + } + + str = "\nthread_type: " + it.GetThreadTypeName(it.thread_type) + + "\ncf_name: " + it.cf_name + + "\noperation_type: " + it.GetOperationName(it.operation_type) + + "\noperation_stage: " + + it.GetOperationStageName(it.operation_stage) + + "\nelapsed_time_ms: " + + it.MicrosToString(it.op_elapsed_micros); + + for (auto &it_props : + it.InterpretOperationProperties(it.operation_type, + it.op_properties)) { + str += "\n" + it_props.first + ": " + std::to_string(it_props.second); + } + + str += "\nstate_type: " + it.GetStateName(it.state_type); + + res |= print_stats(thd, "BG_THREADS", std::to_string(it.thread_id), + str, stat_print); + } + } } else if (stat_type == HA_ENGINE_TRX) { /* Handle the SHOW ENGINE ROCKSDB TRANSACTION STATUS command */ res |= rocksdb_show_snapshot_status(hton, thd, stat_print); @@ -2993,11 +3337,7 @@ static int rocksdb_start_tx_and_assign_read_view( ulong const tx_isolation = my_core::thd_tx_isolation(thd); if (tx_isolation != ISO_REPEATABLE_READ) { - my_printf_error(ER_UNKNOWN_ERROR, - "Only REPEATABLE READ isolation level is supported " - "for START TRANSACTION WITH CONSISTENT SNAPSHOT " - "in RocksDB Storage Engine.", - MYF(0)); + my_error(ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT, MYF(0)); return HA_EXIT_FAILURE; } @@ -3052,11 +3392,14 @@ static void rocksdb_update_table_stats( my_io_perf_t *r, my_io_perf_t *w, my_io_perf_t *r_blob, my_io_perf_t *r_primary, my_io_perf_t *r_secondary, page_stats_t *page_stats, comp_stats_t *comp_stats, - int n_lock_wait, int n_lock_wait_timeout, const char *engine)) { + int n_lock_wait, int n_lock_wait_timeout, int n_lock_deadlock, + const char *engine)) { my_io_perf_t io_perf_read; my_io_perf_t io_perf; page_stats_t page_stats; comp_stats_t comp_stats; + uint lock_wait_timeout_stats; + uint deadlock_stats; std::vector tablenames; /* @@ -3076,7 +3419,7 @@ static void rocksdb_update_table_stats( char tablename_sys[NAME_LEN + 1]; bool is_partition; - if (rdb_normalize_tablename(it, &str)) { + if (rdb_normalize_tablename(it, &str) != HA_EXIT_SUCCESS) { /* Function needs to return void because of the interface and we've * detected an error which shouldn't happen. There's no way to let * caller know that something failed. @@ -3098,6 +3441,8 @@ static void rocksdb_update_table_stats( io_perf_read.bytes = table_handler->m_io_perf_read.bytes.load(); io_perf_read.requests = table_handler->m_io_perf_read.requests.load(); + lock_wait_timeout_stats = table_handler->m_lock_wait_timeout_counter.load(); + deadlock_stats = table_handler->m_deadlock_counter.load(); /* Convert from rocksdb timer to mysql timer. RocksDB values are @@ -3124,8 +3469,8 @@ static void rocksdb_update_table_stats( my_core::filename_to_tablename(tablename.c_str(), tablename_sys, sizeof(tablename_sys)); (*cb)(dbname_sys, tablename_sys, is_partition, &io_perf_read, &io_perf, - &io_perf, &io_perf, &io_perf, &page_stats, &comp_stats, 0, 0, - rocksdb_hton_name); + &io_perf, &io_perf, &io_perf, &page_stats, &comp_stats, 0, + lock_wait_timeout_stats, deadlock_stats, rocksdb_hton_name); } } @@ -3247,37 +3592,37 @@ static int rocksdb_init_func(void *const p) { DBUG_ASSERT(!mysqld_embedded); rocksdb_stats = rocksdb::CreateDBStatistics(); - rocksdb_db_options.statistics = rocksdb_stats; + rocksdb_db_options->statistics = rocksdb_stats; if (rocksdb_rate_limiter_bytes_per_sec != 0) { rocksdb_rate_limiter.reset( rocksdb::NewGenericRateLimiter(rocksdb_rate_limiter_bytes_per_sec)); - rocksdb_db_options.rate_limiter = rocksdb_rate_limiter; + rocksdb_db_options->rate_limiter = rocksdb_rate_limiter; } - rocksdb_db_options.delayed_write_rate = rocksdb_delayed_write_rate; + rocksdb_db_options->delayed_write_rate = rocksdb_delayed_write_rate; std::shared_ptr myrocks_logger = std::make_shared(); rocksdb::Status s = rocksdb::CreateLoggerFromOptions( - rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log); + rocksdb_datadir, *rocksdb_db_options, &rocksdb_db_options->info_log); if (s.ok()) { - myrocks_logger->SetRocksDBLogger(rocksdb_db_options.info_log); + myrocks_logger->SetRocksDBLogger(rocksdb_db_options->info_log); } - rocksdb_db_options.info_log = myrocks_logger; + rocksdb_db_options->info_log = myrocks_logger; myrocks_logger->SetInfoLogLevel( static_cast(rocksdb_info_log_level)); - rocksdb_db_options.wal_dir = rocksdb_wal_dir; + rocksdb_db_options->wal_dir = rocksdb_wal_dir; - rocksdb_db_options.wal_recovery_mode = + rocksdb_db_options->wal_recovery_mode = static_cast(rocksdb_wal_recovery_mode); - rocksdb_db_options.access_hint_on_compaction_start = + rocksdb_db_options->access_hint_on_compaction_start = static_cast( rocksdb_access_hint_on_compaction_start); - if (rocksdb_db_options.allow_mmap_reads && - rocksdb_db_options.use_direct_reads) { + if (rocksdb_db_options->allow_mmap_reads && + rocksdb_db_options->use_direct_reads) { // allow_mmap_reads implies !use_direct_reads and RocksDB will not open if // mmap_reads and direct_reads are both on. (NO_LINT_DEBUG) sql_print_error("RocksDB: Can't enable both use_direct_reads " @@ -3286,18 +3631,28 @@ static int rocksdb_init_func(void *const p) { DBUG_RETURN(HA_EXIT_FAILURE); } - if (rocksdb_db_options.allow_mmap_writes && - rocksdb_db_options.use_direct_writes) { + if (rocksdb_db_options->allow_mmap_writes && + rocksdb_db_options->use_direct_io_for_flush_and_compaction) { // See above comment for allow_mmap_reads. (NO_LINT_DEBUG) - sql_print_error("RocksDB: Can't enable both use_direct_writes " - "and allow_mmap_writes\n"); + sql_print_error("RocksDB: Can't enable both " + "use_direct_io_for_flush_and_compaction and " + "allow_mmap_writes\n"); rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } + // sst_file_manager will move deleted rocksdb sst files to trash_dir + // to be deleted in a background thread. + std::string trash_dir = std::string(rocksdb_datadir) + "/trash"; + rocksdb_db_options->sst_file_manager.reset( + NewSstFileManager(rocksdb_db_options->env, myrocks_logger, trash_dir)); + + rocksdb_db_options->sst_file_manager->SetDeleteRateBytesPerSecond( + rocksdb_sst_mgr_rate_bytes_per_sec); + std::vector cf_names; rocksdb::Status status; - status = rocksdb::DB::ListColumnFamilies(rocksdb_db_options, rocksdb_datadir, + status = rocksdb::DB::ListColumnFamilies(*rocksdb_db_options, rocksdb_datadir, &cf_names); if (!status.ok()) { /* @@ -3310,10 +3665,8 @@ static int rocksdb_init_func(void *const p) { sql_print_information( "RocksDB: assuming that we're creating a new database"); } else { - std::string err_text = status.ToString(); - sql_print_error("RocksDB: Error listing column families: %s", - err_text.c_str()); rdb_open_tables.free_hash(); + rdb_log_status_error(status, "Error listing column families"); DBUG_RETURN(HA_EXIT_FAILURE); } } else @@ -3323,18 +3676,18 @@ static int rocksdb_init_func(void *const p) { std::vector cf_descr; std::vector cf_handles; - rocksdb_tbl_options.index_type = + rocksdb_tbl_options->index_type = (rocksdb::BlockBasedTableOptions::IndexType)rocksdb_index_type; - if (!rocksdb_tbl_options.no_block_cache) { - rocksdb_tbl_options.block_cache = + if (!rocksdb_tbl_options->no_block_cache) { + rocksdb_tbl_options->block_cache = rocksdb::NewLRUCache(rocksdb_block_cache_size); } // Using newer BlockBasedTable format version for better compression // and better memory allocation. // See: // https://github.com/facebook/rocksdb/commit/9ab5adfc59a621d12357580c94451d9f7320c2dd - rocksdb_tbl_options.format_version = 2; + rocksdb_tbl_options->format_version = 2; if (rocksdb_collect_sst_properties) { properties_collector_factory = @@ -3358,15 +3711,16 @@ static int rocksdb_init_func(void *const p) { rocksdb::NewPersistentCache( rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path), cache_size_bytes, myrocks_logger, true, &pcache); - rocksdb_tbl_options.persistent_cache = pcache; + rocksdb_tbl_options->persistent_cache = pcache; } else if (strlen(rocksdb_persistent_cache_path)) { sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size_mb"); - DBUG_RETURN(1); + DBUG_RETURN(HA_EXIT_FAILURE); } - if (!rocksdb_cf_options_map.init( - rocksdb_tbl_options, properties_collector_factory, - rocksdb_default_cf_options, rocksdb_override_cf_options)) { + std::unique_ptr cf_options_map(new Rdb_cf_options()); + if (!cf_options_map->init(*rocksdb_tbl_options, properties_collector_factory, + rocksdb_default_cf_options, + rocksdb_override_cf_options)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Failed to initialize CF options map."); rdb_open_tables.free_hash(); @@ -3384,7 +3738,7 @@ static int rocksdb_init_func(void *const p) { sql_print_information("RocksDB: Column Families at start:"); for (size_t i = 0; i < cf_names.size(); ++i) { rocksdb::ColumnFamilyOptions opts; - rocksdb_cf_options_map.get_cf_options(cf_names[i], &opts); + cf_options_map->get_cf_options(cf_names[i], &opts); sql_print_information(" cf=%s", cf_names[i].c_str()); sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); @@ -3402,13 +3756,9 @@ static int rocksdb_init_func(void *const p) { cf_descr.push_back(rocksdb::ColumnFamilyDescriptor(cf_names[i], opts)); } - rocksdb::Options main_opts(rocksdb_db_options, - rocksdb_cf_options_map.get_defaults()); + rocksdb::Options main_opts(*rocksdb_db_options, + cf_options_map->get_defaults()); - main_opts.env->SetBackgroundThreads(main_opts.max_background_flushes, - rocksdb::Env::Priority::HIGH); - main_opts.env->SetBackgroundThreads(main_opts.max_background_compactions, - rocksdb::Env::Priority::LOW); rocksdb::TransactionDBOptions tx_db_options; tx_db_options.transaction_lock_timeout = 2; // 2 seconds tx_db_options.custom_mutex_factory = std::make_shared(); @@ -3419,10 +3769,8 @@ static int rocksdb_init_func(void *const p) { // We won't start if we'll determine that there's a chance of data corruption // because of incompatible options. if (!status.ok()) { - // NO_LINT_DEBUG - sql_print_error("RocksDB: compatibility check against existing database " - "options failed. %s", - status.ToString().c_str()); + rdb_log_status_error( + status, "Compatibility check against existing database options failed"); rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3431,12 +3779,11 @@ static int rocksdb_init_func(void *const p) { main_opts, tx_db_options, rocksdb_datadir, cf_descr, &cf_handles, &rdb); if (!status.ok()) { - std::string err_text = status.ToString(); - sql_print_error("RocksDB: Error opening instance: %s", err_text.c_str()); + rdb_log_status_error(status, "Error opening instance"); rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } - cf_manager.init(&rocksdb_cf_options_map, &cf_handles); + cf_manager.init(std::move(cf_options_map), &cf_handles); if (dict_manager.init(rdb->GetBaseDB(), &cf_manager)) { // NO_LINT_DEBUG @@ -3474,9 +3821,7 @@ static int rocksdb_init_func(void *const p) { status = rdb->EnableAutoCompaction(compaction_enabled_cf_handles); if (!status.ok()) { - const std::string err_text = status.ToString(); - // NO_LINT_DEBUG - sql_print_error("RocksDB: Error enabling compaction: %s", err_text.c_str()); + rdb_log_status_error(status, "Error enabling compaction"); rdb_open_tables.free_hash(); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -3525,7 +3870,38 @@ static int rocksdb_init_func(void *const p) { } #endif - sql_print_information("RocksDB instance opened"); + err = my_error_register(rdb_get_error_messages, HA_ERR_ROCKSDB_FIRST, + HA_ERR_ROCKSDB_LAST); + if (err != 0) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Couldn't initialize error messages"); + rdb_open_tables.free_hash(); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + // Creating an instance of HistogramImpl should only happen after RocksDB + // has been successfully initialized. + commit_latency_stats = new rocksdb::HistogramImpl(); + + // Construct a list of directories which will be monitored by I/O watchdog + // to make sure that we won't lose write access to them. + std::vector directories; + + // 1. Data directory. + directories.push_back(mysql_real_data_home); + + // 2. Transaction logs. + if (myrocks::rocksdb_wal_dir && *myrocks::rocksdb_wal_dir) { + directories.push_back(myrocks::rocksdb_wal_dir); + } + + io_watchdog = new Rdb_io_watchdog(directories); + io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); + + // NO_LINT_DEBUG + sql_print_information("MyRocks storage engine plugin has been successfully " + "initialized."); + DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -3600,15 +3976,27 @@ static int rocksdb_done_func(void *const p) { delete rdb; rdb = nullptr; + delete commit_latency_stats; + commit_latency_stats = nullptr; + + delete io_watchdog; + io_watchdog = nullptr; + // Disown the cache data since we're shutting down. // This results in memory leaks but it improved the shutdown time. // Don't disown when running under valgrind #ifndef HAVE_purify - if (rocksdb_tbl_options.block_cache) { - rocksdb_tbl_options.block_cache->DisownData(); + if (rocksdb_tbl_options->block_cache) { + rocksdb_tbl_options->block_cache->DisownData(); } #endif /* HAVE_purify */ + rocksdb_db_options = nullptr; + rocksdb_tbl_options = nullptr; + rocksdb_stats = nullptr; + + my_error_unregister(HA_ERR_ROCKSDB_FIRST, HA_ERR_ROCKSDB_LAST); + DBUG_RETURN(error); } @@ -3622,6 +4010,15 @@ static inline void rocksdb_smart_seek(bool seek_backward, } } +static inline void rocksdb_smart_next(bool seek_backward, + rocksdb::Iterator *const iter) { + if (seek_backward) { + iter->Prev(); + } else { + iter->Next(); + } +} + /** @brief Example of simple lock controls. The "table_handler" it creates is a @@ -3750,7 +4147,12 @@ void ha_rocksdb::update_auto_incr_val() { my_bitmap_map *const old_map = dbug_tmp_use_all_columns(table, table->read_set); - new_val = field->val_int() + 1; + new_val = field->val_int(); + // don't increment if we would wrap around + if (new_val != std::numeric_limits::max()) { + new_val++; + } + dbug_tmp_restore_column_map(table->read_set, old_map); longlong auto_incr_val = m_tbl_def->m_auto_incr_val; @@ -3776,11 +4178,12 @@ int ha_rocksdb::load_hidden_pk_value() { Decode PK field from the key */ longlong hidden_pk_id = 0; - if (read_hidden_pk_id_from_rowkey(&hidden_pk_id)) { + auto err = read_hidden_pk_id_from_rowkey(&hidden_pk_id); + if (err) { if (is_new_snapshot) { tx->release_snapshot(); } - return HA_ERR_INTERNAL_ERROR; + return err; } hidden_pk_id++; @@ -3821,12 +4224,13 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) { // Get hidden primary key from old key slice Rdb_string_reader reader(&rowkey_slice); if ((!reader.read(Rdb_key_def::INDEX_NUMBER_SIZE))) - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CORRUPT_DATA; const int length = Field_longlong::PACK_LENGTH; const uchar *from = reinterpret_cast(reader.read(length)); if (from == nullptr) { - return HA_EXIT_FAILURE; /* Mem-comparable image doesn't have enough bytes */ + /* Mem-comparable image doesn't have enough bytes */ + return HA_ERR_ROCKSDB_CORRUPT_DATA; } *hidden_pk_id = rdb_netbuf_read_uint64(&from); @@ -3895,6 +4299,11 @@ bool ha_rocksdb::same_table(const ha_rocksdb &other) const { return m_tbl_def->base_tablename() == other.m_tbl_def->base_tablename(); } +/** + @return + false OK + other Error inpacking the data +*/ bool ha_rocksdb::init_with_fields() { DBUG_ENTER_FUNC(); @@ -3910,6 +4319,73 @@ bool ha_rocksdb::init_with_fields() { DBUG_RETURN(false); /* Ok */ } +/* + If the key is a TTL key, we may need to filter it out. + + The purpose of read filtering for tables with TTL is to ensure that + during a transaction a key which has expired already but not removed by + compaction yet is not returned to the user. + + Without this the user might be hit with problems such as disappearing + rows within a transaction, etc, because the compaction filter ignores + snapshots when filtering keys. +*/ +bool ha_rocksdb::should_hide_ttl_rec(const rocksdb::Slice &ttl_rec_val, + const int64_t curr_ts) { + DBUG_ASSERT(m_pk_descr != nullptr); + DBUG_ASSERT(m_pk_descr->has_ttl()); + DBUG_ASSERT(m_pk_descr->m_ttl_rec_offset != UINT_MAX); + + /* + Curr_ts can only be 0 if there are no snapshots open. + should_hide_ttl_rec can only be called when there is >=1 snapshots, unless + we are filtering on the write path (single INSERT/UPDATE) in which case + we are passed in the current time as curr_ts. + + In the event curr_ts is 0, we always decide not to filter the record. We + also log a warning and increment a diagnostic counter. + */ + if (curr_ts == 0) { + update_row_stats(ROWS_HIDDEN_NO_SNAPSHOT); + return false; + } + + if (!rdb_is_ttl_read_filtering_enabled() || !rdb_is_ttl_enabled()) { + return false; + } + + Rdb_string_reader reader(&ttl_rec_val); + + /* + Find where the 8-byte ttl is for each record in this index. + */ + uint64 ts; + if (!reader.read(m_pk_descr->m_ttl_rec_offset) || reader.read_uint64(&ts)) { + /* + This condition should never be reached since all TTL records have an + 8 byte ttl field in front. Don't filter the record out, and log an error. + */ + std::string buf; + buf = rdb_hexdump(ttl_rec_val.data(), ttl_rec_val.size(), + RDB_MAX_HEXDUMP_LEN); + const GL_INDEX_ID gl_index_id = m_pk_descr->get_gl_index_id(); + // NO_LINT_DEBUG + sql_print_error("Decoding ttl from PK value failed, " + "for index (%u,%u), val: %s", + gl_index_id.cf_id, gl_index_id.index_id, buf.c_str()); + DBUG_ASSERT(0); + return false; + } + + /* Hide record if it has expired before the current snapshot time. */ + uint64 read_filter_ts = 0; +#ifndef NDEBUG + read_filter_ts += rdb_dbug_set_ttl_read_filter_ts(); +#endif + return ts + m_pk_descr->m_ttl_duration + read_filter_ts <= + static_cast(curr_ts); +} + /** Convert record from table->record[0] form into a form that can be written into rocksdb. @@ -3919,19 +4395,81 @@ bool ha_rocksdb::init_with_fields() { @param packed_rec OUT Data slice with record data. */ -void ha_rocksdb::convert_record_to_storage_format( - const rocksdb::Slice &pk_packed_slice, - Rdb_string_writer *const pk_unpack_info, rocksdb::Slice *const packed_rec) { - DBUG_ASSERT_IMP(m_maybe_unpack_info, pk_unpack_info); +int ha_rocksdb::convert_record_to_storage_format( + const struct update_row_info &row_info, rocksdb::Slice *const packed_rec) { + DBUG_ASSERT_IMP(m_maybe_unpack_info, row_info.new_pk_unpack_info); + DBUG_ASSERT(m_pk_descr != nullptr); + + const rocksdb::Slice &pk_packed_slice = row_info.new_pk_slice; + Rdb_string_writer *const pk_unpack_info = row_info.new_pk_unpack_info; + bool has_ttl = m_pk_descr->has_ttl(); + bool has_ttl_column = !m_pk_descr->m_ttl_column.empty(); + bool ttl_in_pk = has_ttl_column && (row_info.ttl_pk_offset != UINT_MAX); + m_storage_record.length(0); - /* All NULL bits are initially 0 */ - m_storage_record.fill(m_null_bytes_in_rec, 0); + if (has_ttl) { + /* If it's a TTL record, reserve space for 8 byte TTL value in front. */ + m_storage_record.fill(ROCKSDB_SIZEOF_TTL_RECORD + m_null_bytes_in_rec, 0); - // If a primary key may have non-empty unpack_info for certain values, - // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block - // itself was prepared in Rdb_key_def::pack_record. - if (m_maybe_unpack_info) { + /* + If the TTL is contained within the key, we use the offset to find the + TTL value and place it in the beginning of the value record. + */ + if (ttl_in_pk) { + Rdb_string_reader reader(&pk_packed_slice); + const char *ts; + if (!reader.read(row_info.ttl_pk_offset) || + !(ts = reader.read(ROCKSDB_SIZEOF_TTL_RECORD))) { + std::string buf; + buf = rdb_hexdump(pk_packed_slice.data(), pk_packed_slice.size(), + RDB_MAX_HEXDUMP_LEN); + const GL_INDEX_ID gl_index_id = m_pk_descr->get_gl_index_id(); + // NO_LINT_DEBUG + sql_print_error("Decoding ttl from PK failed during insert, " + "for index (%u,%u), key: %s", + gl_index_id.cf_id, gl_index_id.index_id, buf.c_str()); + return HA_EXIT_FAILURE; + } + + char *const data = const_cast(m_storage_record.ptr()); + memcpy(data, ts, ROCKSDB_SIZEOF_TTL_RECORD); +#ifndef NDEBUG + // Adjust for test case if needed + rdb_netbuf_store_uint64( + reinterpret_cast(data), + rdb_netbuf_to_uint64(reinterpret_cast(data)) + + rdb_dbug_set_ttl_rec_ts()); +#endif + } else if (!has_ttl_column) { + /* + For implicitly generated TTL records we need to copy over the old + TTL value from the old record in the event of an update. It was stored + in m_ttl_bytes. + + Otherwise, generate a timestamp using the current time. + */ + if (!row_info.old_pk_slice.empty()) { + char *const data = const_cast(m_storage_record.ptr()); + memcpy(data, m_ttl_bytes, sizeof(uint64)); + } else { + uint64 ts = static_cast(std::time(nullptr)); +#ifndef NDEBUG + ts += rdb_dbug_set_ttl_rec_ts(); +#endif + char *const data = const_cast(m_storage_record.ptr()); + rdb_netbuf_store_uint64(reinterpret_cast(data), ts); + } + } + } else { + /* All NULL bits are initially 0 */ + m_storage_record.fill(m_null_bytes_in_rec, 0); + } + + // If a primary key may have non-empty unpack_info for certain values, + // (m_maybe_unpack_info=TRUE), we write the unpack_info block. The block + // itself was prepared in Rdb_key_def::pack_record. + if (m_maybe_unpack_info) { m_storage_record.append(reinterpret_cast(pk_unpack_info->ptr()), pk_unpack_info->get_current_pos()); } @@ -3944,7 +4482,11 @@ void ha_rocksdb::convert_record_to_storage_format( Field *const field = table->field[i]; if (m_encoder_arr[i].maybe_null()) { - char *const data = (char *)m_storage_record.ptr(); + char *data = const_cast(m_storage_record.ptr()); + if (has_ttl) { + data += ROCKSDB_SIZEOF_TTL_RECORD; + } + if (field->is_null()) { data[m_encoder_arr[i].m_null_offset] |= m_encoder_arr[i].m_null_mask; /* Don't write anything for NULL values */ @@ -3981,6 +4523,24 @@ void ha_rocksdb::convert_record_to_storage_format( /* Copy the field data */ const uint len = field->pack_length_in_rec(); m_storage_record.append(reinterpret_cast(field->ptr), len); + + /* + Check if this is the TTL field within the table, if so store the TTL + in the front of the record as well here. + */ + if (has_ttl && has_ttl_column && + i == m_pk_descr->get_ttl_field_offset()) { + DBUG_ASSERT(len == ROCKSDB_SIZEOF_TTL_RECORD); + DBUG_ASSERT(field->real_type() == MYSQL_TYPE_LONGLONG); + DBUG_ASSERT(m_pk_descr->get_ttl_field_offset() != UINT_MAX); + + char *const data = const_cast(m_storage_record.ptr()); + uint64 ts = uint8korr(field->ptr); +#ifndef NDEBUG + ts += rdb_dbug_set_ttl_rec_ts(); +#endif + rdb_netbuf_store_uint64(reinterpret_cast(data), ts); + } } } @@ -4001,6 +4561,8 @@ void ha_rocksdb::convert_record_to_storage_format( *packed_rec = rocksdb::Slice(m_storage_record.ptr(), m_storage_record.length()); + + return HA_EXIT_SUCCESS; } /* @@ -4031,7 +4593,9 @@ void ha_rocksdb::setup_read_decoders() { continue; } + // bitmap is cleared on index merge, but it still needs to decode columns if (m_lock_rows == RDB_LOCK_WRITE || m_verify_row_debug_checksums || + bitmap_is_clear_all(table->read_set) || bitmap_is_set(table->read_set, table->field[i]->field_index)) { // We will need to decode this field m_decoders_vect.push_back({&m_encoder_arr[i], true, skip_size}); @@ -4117,7 +4681,7 @@ int ha_rocksdb::convert_blob_from_storage_format( const char *data_len_str; if (!(data_len_str = reader->read(length_bytes))) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } memcpy(blob->ptr, data_len_str, length_bytes); @@ -4127,7 +4691,7 @@ int ha_rocksdb::convert_blob_from_storage_format( table->s->db_low_byte_first); const char *blob_ptr; if (!(blob_ptr = reader->read(data_len))) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (decode) { @@ -4147,7 +4711,7 @@ int ha_rocksdb::convert_varchar_from_storage_format( { const char *data_len_str; if (!(data_len_str = reader->read(field_var->length_bytes))) - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; uint data_len; /* field_var->length_bytes is 1 or 2 */ @@ -4160,11 +4724,11 @@ int ha_rocksdb::convert_varchar_from_storage_format( if (data_len > field_var->field_length) { /* The data on disk is longer than table DDL allows? */ - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (!reader->read(data_len)) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (decode) { @@ -4183,7 +4747,7 @@ int ha_rocksdb::convert_field_from_storage_format( const char *data_bytes; if (len > 0) { if ((data_bytes = reader->read(len)) == nullptr) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (decode) @@ -4237,17 +4801,26 @@ int ha_rocksdb::convert_record_from_storage_format( uint16 unpack_info_len = 0; rocksdb::Slice unpack_slice; + /* If it's a TTL record, skip the 8 byte TTL value */ + const char *ttl_bytes; + if (m_pk_descr->has_ttl()) { + if ((ttl_bytes = reader.read(ROCKSDB_SIZEOF_TTL_RECORD))) { + memcpy(m_ttl_bytes, ttl_bytes, ROCKSDB_SIZEOF_TTL_RECORD); + } else { + return HA_ERR_ROCKSDB_CORRUPT_DATA; + } + } + /* Other fields are decoded from the value */ const char *null_bytes = nullptr; if (m_null_bytes_in_rec && !(null_bytes = reader.read(m_null_bytes_in_rec))) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } if (m_maybe_unpack_info) { unpack_info = reader.read(RDB_UNPACK_HEADER_SIZE); - if (!unpack_info || unpack_info[0] != RDB_UNPACK_DATA_TAG) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } unpack_info_len = @@ -4257,13 +4830,13 @@ int ha_rocksdb::convert_record_from_storage_format( reader.read(unpack_info_len - RDB_UNPACK_HEADER_SIZE); } - if (m_pk_descr->unpack_record(table, buf, &rowkey_slice, - unpack_info ? &unpack_slice : nullptr, - false /* verify_checksum */)) { - return HA_ERR_INTERNAL_ERROR; + int err = m_pk_descr->unpack_record(table, buf, &rowkey_slice, + unpack_info ? &unpack_slice : nullptr, + false /* verify_checksum */); + if (err != HA_EXIT_SUCCESS) { + return err; } - int err = HA_EXIT_SUCCESS; for (auto it = m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) { const Rdb_field_encoder *const field_dec = it->m_field_enc; const bool decode = it->m_decode; @@ -4274,8 +4847,9 @@ int ha_rocksdb::convert_record_from_storage_format( Field *const field = table->field[field_dec->m_field_index]; /* Skip the bytes we need to skip */ - if (it->m_skip && !reader.read(it->m_skip)) - return HA_ERR_INTERNAL_ERROR; + if (it->m_skip && !reader.read(it->m_skip)) { + return HA_ERR_ROCKSDB_CORRUPT_DATA; + } uint field_offset = field->ptr - table->record[0]; uint null_offset = field->null_offset(); @@ -4342,7 +4916,7 @@ int ha_rocksdb::convert_record_from_storage_format( if (stored_key_chksum != computed_key_chksum) { m_pk_descr->report_checksum_mismatch(true, key->data(), key->size()); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CHECKSUM_MISMATCH; } DBUG_EXECUTE_IF("myrocks_simulate_bad_pk_checksum2", @@ -4350,13 +4924,13 @@ int ha_rocksdb::convert_record_from_storage_format( if (stored_val_chksum != computed_val_chksum) { m_pk_descr->report_checksum_mismatch(false, value->data(), value->size()); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CHECKSUM_MISMATCH; } m_row_checksums_checked++; } if (reader.remaining_bytes()) - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } return HA_EXIT_SUCCESS; @@ -4472,16 +5046,10 @@ int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, m_pk_descr->setup(table_arg, tbl_def_arg); m_pk_tuple = reinterpret_cast(my_malloc(key_len, MYF(0))); - if (m_pk_tuple == nullptr) { - goto error; - } pack_key_len = m_pk_descr->max_storage_fmt_length(); m_pk_packed_tuple = reinterpret_cast(my_malloc(pack_key_len, MYF(0))); - if (m_pk_packed_tuple == nullptr) { - goto error; - } /* Sometimes, we may use m_sk_packed_tuple for storing packed PK */ max_packed_sk_len = pack_key_len; @@ -4498,40 +5066,40 @@ int ha_rocksdb::alloc_key_buffers(const TABLE *const table_arg, } } - if (!(m_sk_packed_tuple = - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_sk_match_prefix_buf = - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_sk_packed_tuple_old = - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_end_key_packed_tuple = - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !((m_pack_buffer = reinterpret_cast( - my_malloc(max_packed_sk_len, MYF(0)))))) { - goto error; - } + m_sk_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_sk_match_prefix_buf = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_sk_packed_tuple_old = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_end_key_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_pack_buffer = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); /* If inplace alter is happening, allocate special buffers for unique secondary index duplicate checking. */ - if (alloc_alter_buffers && - (!(m_dup_sk_packed_tuple = - reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0)))) || - !(m_dup_sk_packed_tuple_old = reinterpret_cast( - my_malloc(max_packed_sk_len, MYF(0)))))) { - goto error; + if (alloc_alter_buffers) { + m_dup_sk_packed_tuple = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + m_dup_sk_packed_tuple_old = + reinterpret_cast(my_malloc(max_packed_sk_len, MYF(0))); + } + + if (m_pk_tuple == nullptr || m_pk_packed_tuple == nullptr || + m_sk_packed_tuple == nullptr || m_sk_packed_tuple_old == nullptr || + m_end_key_packed_tuple == nullptr || m_pack_buffer == nullptr || + (alloc_alter_buffers && (m_dup_sk_packed_tuple == nullptr || + m_dup_sk_packed_tuple_old == nullptr))) { + // One or more of the above allocations failed. Clean up and exit + free_key_buffers(); + + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } DBUG_RETURN(HA_EXIT_SUCCESS); - -error: - // If we're here then this means that at some point above an allocation may - // have failed. To avoid any resource leaks and maintain a clear contract - // we'll clean up before returning the error code. - free_key_buffers(); - - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } void ha_rocksdb::free_key_buffers() { @@ -4595,17 +5163,23 @@ void ha_rocksdb::set_skip_unique_check_tables(const char *const whitelist) { m_skip_unique_check = regex_handler.matches(m_tbl_def->base_tablename()); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { DBUG_ENTER_FUNC(); - if (close()) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + int err = close(); + if (err) { + DBUG_RETURN(err); } m_table_handler = rdb_open_tables.get_table_handler(name); if (m_table_handler == nullptr) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } my_core::thr_lock_data_init(&m_table_handler->m_thr_lock, &m_db_lock, @@ -4614,8 +5188,9 @@ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { &m_table_handler->m_io_perf_read, &stats); std::string fullname; - if (rdb_normalize_tablename(name, &fullname)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + err = rdb_normalize_tablename(name, &fullname); + if (err != HA_EXIT_SUCCESS) { + DBUG_RETURN(err); } m_tbl_def = ddl_manager.find(fullname); @@ -4623,7 +5198,7 @@ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { my_error(ER_INTERNAL_ERROR, MYF(0), "Attempt to open a table that is not present in RocksDB-SE data " "dictionary"); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(HA_ERR_ROCKSDB_INVALID_TABLE); } m_lock_rows = RDB_LOCK_NONE; @@ -4637,7 +5212,7 @@ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { key_used_on_scan = table->s->primary_key; // close() above has already called free_key_buffers(). No need to do it here. - int err = alloc_key_buffers(table, m_tbl_def); + err = alloc_key_buffers(table, m_tbl_def); if (err) { DBUG_RETURN(err); @@ -4673,13 +5248,14 @@ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) { if (table->found_next_number_field) load_auto_incr_value(); - if (has_hidden_pk(table) && load_hidden_pk_value()) { + if (has_hidden_pk(table) && + (err = load_hidden_pk_value()) != HA_EXIT_SUCCESS) { free_key_buffers(); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(err); } /* Index block size in MyRocks: used by MySQL in query optimization */ - stats.block_size = rocksdb_tbl_options.block_size; + stats.block_size = rocksdb_tbl_options->block_size; /* Determine at open whether we can use Read Free Replication or not */ set_use_read_free_rpl(THDVAR(ha_thd(), read_free_rpl_tables)); @@ -4709,6 +5285,40 @@ int ha_rocksdb::close(void) { DBUG_RETURN(HA_EXIT_SUCCESS); } +static const char *rdb_error_messages[] = { + "Table must have a PRIMARY KEY.", + "Specifying DATA DIRECTORY for an individual table is not supported.", + "Specifying INDEX DIRECTORY for an individual table is not supported.", + "RocksDB commit failed.", + "Failure during bulk load operation.", + "Found data corruption.", + "CRC checksum mismatch.", + "Invalid table.", + "Could not access RocksDB properties.", + "File I/O error during merge/sort operation.", + "RocksDB status: not found.", + "RocksDB status: corruption.", + "RocksDB status: invalid argument.", + "RocksDB status: io error.", + "RocksDB status: no space.", + "RocksDB status: merge in progress.", + "RocksDB status: incomplete.", + "RocksDB status: shutdown in progress.", + "RocksDB status: timed out.", + "RocksDB status: aborted.", + "RocksDB status: lock limit reached.", + "RocksDB status: busy.", + "RocksDB status: deadlock.", + "RocksDB status: expired.", + "RocksDB status: try again.", +}; + +static_assert((sizeof(rdb_error_messages) / sizeof(rdb_error_messages[0])) == + ((HA_ERR_ROCKSDB_LAST - HA_ERR_ROCKSDB_FIRST) + 1), + "Number of error messages doesn't match number of error codes"); + +static const char **rdb_get_error_messages() { return rdb_error_messages; } + bool ha_rocksdb::get_error_message(const int error, String *const buf) { DBUG_ENTER_FUNC(); @@ -4717,44 +5327,97 @@ bool ha_rocksdb::get_error_message(const int error, String *const buf) { static_assert(HA_ERR_ROCKSDB_LAST > HA_ERR_LAST, "HA_ERR_ROCKSDB_LAST > HA_ERR_LAST"); - DBUG_ASSERT(error > 0); - DBUG_ASSERT(error <= HA_ERR_ROCKSDB_LAST); DBUG_ASSERT(buf != nullptr); - Rdb_transaction *const tx = get_tx_from_thd(ha_thd()); - bool temp_error = false; + if (error == HA_ERR_LOCK_WAIT_TIMEOUT || error == HA_ERR_LOCK_DEADLOCK) { + Rdb_transaction *const tx = get_tx_from_thd(ha_thd()); + DBUG_ASSERT(tx != nullptr); + buf->append(tx->m_detailed_error); + DBUG_RETURN(true); + } + + if (error >= HA_ERR_ROCKSDB_FIRST && error <= HA_ERR_ROCKSDB_LAST) { + buf->append(rdb_error_messages[error - HA_ERR_ROCKSDB_FIRST]); + } + + // We can be called with the values which are < HA_ERR_FIRST because most + // MySQL internal functions will just return HA_EXIT_FAILURE in case of + // an error. + + DBUG_RETURN(false); +} + +/* + Generalized way to convert RocksDB status errors into MySQL error code, and + print error message. - switch (error) { - case HA_ERR_ROCKSDB_PK_REQUIRED: - buf->append("Table must have a PRIMARY KEY."); + Each error code below maps to a RocksDB status code found in: + rocksdb/include/rocksdb/status.h +*/ +int ha_rocksdb::rdb_error_to_mysql(const rocksdb::Status &s, + const char *opt_msg) { + DBUG_ASSERT(!s.ok()); + + int err; + switch (s.code()) { + case rocksdb::Status::Code::kOk: + err = HA_EXIT_SUCCESS; break; - case HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED: - buf->append("Unique indexes are not supported."); + case rocksdb::Status::Code::kNotFound: + err = HA_ERR_ROCKSDB_STATUS_NOT_FOUND; break; - case HA_ERR_ROCKSDB_TOO_MANY_LOCKS: - buf->append("Number of locks held reached @@rocksdb_max_row_locks."); + case rocksdb::Status::Code::kCorruption: + err = HA_ERR_ROCKSDB_STATUS_CORRUPTION; break; - case HA_ERR_LOCK_WAIT_TIMEOUT: - DBUG_ASSERT(tx != nullptr); - buf->append(tx->m_detailed_error); - temp_error = true; + case rocksdb::Status::Code::kNotSupported: + err = HA_ERR_ROCKSDB_STATUS_NOT_SUPPORTED; break; - case HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED: - buf->append("Specifying DATA DIRECTORY for an individual table is not " - "supported."); + case rocksdb::Status::Code::kInvalidArgument: + err = HA_ERR_ROCKSDB_STATUS_INVALID_ARGUMENT; break; - case HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED: - buf->append("Specifying INDEX DIRECTORY for an individual table is not " - "supported."); + case rocksdb::Status::Code::kIOError: + err = (s.IsNoSpace()) ? HA_ERR_ROCKSDB_STATUS_NO_SPACE + : HA_ERR_ROCKSDB_STATUS_IO_ERROR; break; - default: - // We can be called with the values which are < HA_ERR_FIRST because most - // MySQL internal functions will just return HA_EXIT_FAILURE in case of - // an error. + case rocksdb::Status::Code::kMergeInProgress: + err = HA_ERR_ROCKSDB_STATUS_MERGE_IN_PROGRESS; + break; + case rocksdb::Status::Code::kIncomplete: + err = HA_ERR_ROCKSDB_STATUS_INCOMPLETE; break; + case rocksdb::Status::Code::kShutdownInProgress: + err = HA_ERR_ROCKSDB_STATUS_SHUTDOWN_IN_PROGRESS; + break; + case rocksdb::Status::Code::kTimedOut: + err = HA_ERR_ROCKSDB_STATUS_TIMED_OUT; + break; + case rocksdb::Status::Code::kAborted: + err = (s.IsLockLimit()) ? HA_ERR_ROCKSDB_STATUS_LOCK_LIMIT + : HA_ERR_ROCKSDB_STATUS_ABORTED; + break; + case rocksdb::Status::Code::kBusy: + err = (s.IsDeadlock()) ? HA_ERR_ROCKSDB_STATUS_DEADLOCK + : HA_ERR_ROCKSDB_STATUS_BUSY; + break; + case rocksdb::Status::Code::kExpired: + err = HA_ERR_ROCKSDB_STATUS_EXPIRED; + break; + case rocksdb::Status::Code::kTryAgain: + err = HA_ERR_ROCKSDB_STATUS_TRY_AGAIN; + break; + default: + DBUG_ASSERT(0); + return -1; } - DBUG_RETURN(temp_error); + if (opt_msg) { + my_error(ER_RDB_STATUS_MSG, MYF(0), opt_msg, s.code(), + s.ToString().c_str()); + } else { + my_error(ER_RDB_STATUS_GENERAL, MYF(0), s.code(), s.ToString().c_str()); + } + + return err; } /* MyRocks supports only the following collations for indexed columns */ @@ -4898,12 +5561,10 @@ int ha_rocksdb::create_cfs( } collation_err += coll->name; } - my_printf_error( - ER_UNKNOWN_ERROR, "Unsupported collation on string indexed " - "column %s.%s Use binary collation (%s).", - MYF(0), tbl_def_arg->full_tablename().c_str(), - table_arg->key_info[i].key_part[part].field->field_name, - collation_err.c_str()); + my_error(ER_UNSUPPORTED_COLLATION, MYF(0), + tbl_def_arg->full_tablename().c_str(), + table_arg->key_info[i].key_part[part].field->field_name, + collation_err.c_str()); DBUG_RETURN(HA_EXIT_FAILURE); } } @@ -4919,32 +5580,17 @@ int ha_rocksdb::create_cfs( std::string cf_name = generate_cf_name(i, table_arg, tbl_def_arg, &per_part_match_found); - const char *const key_name = get_key_name(i, table_arg, tbl_def_arg); - - if (looks_like_per_index_cf_typo(cf_name.c_str())) { - my_error(ER_NOT_SUPPORTED_YET, MYF(0), - "column family name looks like a typo of $per_index_cf."); - DBUG_RETURN(HA_EXIT_FAILURE); - } - // Prevent create from using the system column family. - if (!cf_name.empty() && strcmp(DEFAULT_SYSTEM_CF_NAME, - cf_name.c_str()) == 0) { + if (cf_name == DEFAULT_SYSTEM_CF_NAME) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "column family not valid for storing index data."); DBUG_RETURN(HA_EXIT_FAILURE); } - bool is_auto_cf_flag; - // Here's how `get_or_create_cf` will use the input parameters: // // `cf_name` - will be used as a CF name. - // `key_name` - will be only used in case of "$per_index_cf". - cf_handle = - cf_manager.get_or_create_cf(rdb, cf_name.c_str(), - tbl_def_arg->full_tablename(), key_name, - &is_auto_cf_flag); + cf_handle = cf_manager.get_or_create_cf(rdb, cf_name); if (!cf_handle) { DBUG_RETURN(HA_EXIT_FAILURE); @@ -4954,7 +5600,6 @@ int ha_rocksdb::create_cfs( cf.cf_handle = cf_handle; cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str()); - cf.is_auto_cf = is_auto_cf_flag; cf.is_per_partition_cf = per_part_match_found; } @@ -4996,6 +5641,7 @@ int ha_rocksdb::create_inplace_key_defs( uint i; for (i = 0; i < tbl_def_arg->m_key_count; i++) { const auto &it = old_key_pos.find(get_key_name(i, table_arg, tbl_def_arg)); + if (it != old_key_pos.end()) { /* Found matching index in old table definition, so copy it over to the @@ -5003,12 +5649,9 @@ int ha_rocksdb::create_inplace_key_defs( */ const Rdb_key_def &okd = *old_key_descr[it->second]; - uint16 index_dict_version = 0; - uchar index_type = 0; - uint16 kv_version = 0; const GL_INDEX_ID gl_index_id = okd.get_gl_index_id(); - if (!dict_manager.get_index_info(gl_index_id, &index_dict_version, - &index_type, &kv_version)) { + struct Rdb_index_info index_info; + if (!dict_manager.get_index_info(gl_index_id, &index_info)) { // NO_LINT_DEBUG sql_print_error("RocksDB: Could not get index information " "for Index Number (%u,%u), table %s", @@ -5017,16 +5660,25 @@ int ha_rocksdb::create_inplace_key_defs( DBUG_RETURN(HA_EXIT_FAILURE); } + uint32 ttl_rec_offset = + Rdb_key_def::has_index_flag(index_info.m_index_flags, + Rdb_key_def::TTL_FLAG) + ? Rdb_key_def::calculate_index_flag_offset( + index_info.m_index_flags, Rdb_key_def::TTL_FLAG) + : UINT_MAX; + /* We can't use the copy constructor because we need to update the keynr within the pack_info for each field and the keyno of the keydef itself. */ new_key_descr[i] = std::make_shared( - okd.get_index_number(), i, okd.get_cf(), index_dict_version, - index_type, kv_version, okd.m_is_reverse_cf, okd.m_is_auto_cf, + okd.get_index_number(), i, okd.get_cf(), + index_info.m_index_dict_version, index_info.m_index_type, + index_info.m_kv_version, okd.m_is_reverse_cf, okd.m_is_per_partition_cf, okd.m_name.c_str(), - dict_manager.get_stats(gl_index_id)); + dict_manager.get_stats(gl_index_id), index_info.m_index_flags, + ttl_rec_offset, index_info.m_ttl_duration); } else if (create_key_def(table_arg, i, tbl_def_arg, &new_key_descr[i], cfs[i])) { DBUG_RETURN(HA_EXIT_FAILURE); @@ -5083,14 +5735,66 @@ std::unordered_map ha_rocksdb::get_old_key_positions( KEY *const new_key = &table_arg->key_info[it->second]; - if (!compare_key_parts(old_key, new_key)) { - old_key_pos[old_key->name] = i; + /* + Check that the key is identical between old and new tables. + If not, we still need to create a new index. + + The exception is if there is an index changed from unique to non-unique, + in these cases we don't need to rebuild as they are stored the same way in + RocksDB. + */ + bool unique_to_non_unique = + ((old_key->flags ^ new_key->flags) == HA_NOSAME) && + (old_key->flags & HA_NOSAME); + + if (compare_keys(old_key, new_key) && !unique_to_non_unique) { + continue; + } + + /* Check to make sure key parts match. */ + if (compare_key_parts(old_key, new_key)) { + continue; } + + old_key_pos[old_key->name] = i; } DBUG_RETURN(old_key_pos); } +/* Check to see if two keys are identical. */ +int ha_rocksdb::compare_keys(const KEY *const old_key, + const KEY *const new_key) const { + DBUG_ENTER_FUNC(); + + DBUG_ASSERT(old_key != nullptr); + DBUG_ASSERT(new_key != nullptr); + + /* Check index name. */ + if (strcmp(old_key->name, new_key->name) != 0) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* If index algorithms are different then keys are different. */ + if (old_key->algorithm != new_key->algorithm) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* Check that the key is identical between old and new tables. */ + if ((old_key->flags ^ new_key->flags) & HA_KEYFLAG_MASK) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* Check index comment. (for column family changes) */ + std::string old_comment(old_key->comment.str, old_key->comment.length); + std::string new_comment(new_key->comment.str, new_key->comment.length); + if (old_comment.compare(new_comment) != 0) { + DBUG_RETURN(HA_EXIT_FAILURE); + } + + DBUG_RETURN(HA_EXIT_SUCCESS); +} + /* Check two keys to ensure that key parts within keys match */ int ha_rocksdb::compare_key_parts(const KEY *const old_key, const KEY *const new_key) const { @@ -5110,6 +5814,11 @@ int ha_rocksdb::compare_key_parts(const KEY *const old_key, new_key->key_part[i].field->field_name) != 0) { DBUG_RETURN(HA_EXIT_FAILURE); } + + /* Check if prefix index key part length has changed */ + if (old_key->key_part[i].length != new_key->key_part[i].length) { + DBUG_RETURN(HA_EXIT_FAILURE); + } } DBUG_RETURN(HA_EXIT_SUCCESS); @@ -5141,6 +5850,37 @@ int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i, DBUG_ASSERT(new_key_def != nullptr); DBUG_ASSERT(*new_key_def == nullptr); + uint64 ttl_duration = 0; + std::string ttl_column; + uint ttl_field_offset; + + uint err; + if ((err = Rdb_key_def::extract_ttl_duration(table_arg, tbl_def_arg, + &ttl_duration))) { + DBUG_RETURN(err); + } + + if ((err = Rdb_key_def::extract_ttl_col(table_arg, tbl_def_arg, &ttl_column, + &ttl_field_offset))) { + DBUG_RETURN(err); + } + + /* We don't currently support TTL on tables with secondary keys. */ + if (ttl_duration > 0 && + (table_arg->s->keys > 1 || is_hidden_pk(i, table_arg, tbl_def_arg))) { + my_error(ER_RDB_TTL_UNSUPPORTED, MYF(0)); + DBUG_RETURN(HA_EXIT_FAILURE); + } + + /* + If TTL duration is not specified but TTL column was specified, throw an + error because TTL column requires duration. + */ + if (ttl_duration == 0 && !ttl_column.empty()) { + my_error(ER_RDB_TTL_COL_FORMAT, MYF(0), ttl_column.c_str()); + DBUG_RETURN(HA_EXIT_FAILURE); + } + const uint index_id = ddl_manager.get_and_update_next_number(&dict_manager); const uint16_t index_dict_version = Rdb_key_def::INDEX_INFO_VERSION_LATEST; uchar index_type; @@ -5159,11 +5899,30 @@ int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i, kv_version = sk_latest_version; } + // Use PRIMARY_FORMAT_VERSION_UPDATE1 here since it is the same value as + // SECONDARY_FORMAT_VERSION_UPDATE1 so it doesn't matter if this is a + // primary key or secondary key. + DBUG_EXECUTE_IF("MYROCKS_LEGACY_VARBINARY_FORMAT", { + kv_version = Rdb_key_def::PRIMARY_FORMAT_VERSION_UPDATE1; + }); + + uint32 index_flags = (ttl_duration > 0 ? Rdb_key_def::TTL_FLAG : 0); + + uint32 ttl_rec_offset = + Rdb_key_def::has_index_flag(index_flags, Rdb_key_def::TTL_FLAG) + ? Rdb_key_def::calculate_index_flag_offset(index_flags, + Rdb_key_def::TTL_FLAG) + : UINT_MAX; + const char *const key_name = get_key_name(i, table_arg, m_tbl_def); *new_key_def = std::make_shared( index_id, i, cf_info.cf_handle, index_dict_version, index_type, - kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, - cf_info.is_per_partition_cf, key_name); + kv_version, cf_info.is_reverse_cf, cf_info.is_per_partition_cf, key_name, + Rdb_index_stats(), index_flags, ttl_rec_offset, ttl_duration); + + if (!ttl_column.empty()) { + (*new_key_def)->m_ttl_column = ttl_column; + } DBUG_RETURN(HA_EXIT_SUCCESS); } @@ -5174,13 +5933,13 @@ int rdb_normalize_tablename(const std::string &tablename, if (tablename.size() < 2 || tablename[0] != '.' || tablename[1] != '/') { DBUG_ASSERT(0); // We were not passed table name? - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_INVALID_TABLE; } size_t pos = tablename.find_first_of('/', 2); if (pos == std::string::npos) { DBUG_ASSERT(0); // We were not passed table name? - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_INVALID_TABLE; } *strbuf = tablename.substr(2, pos - 2) + "." + tablename.substr(pos + 1); @@ -5261,7 +6020,7 @@ int rdb_split_normalized_tablename(const std::string &fullname, /* Invalid table name? */ if (dotpos == std::string::npos) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_INVALID_TABLE; } // Table must have a database name associated with it. @@ -5308,6 +6067,10 @@ int rdb_split_normalized_tablename(const std::string &fullname, Called from handle.cc by ha_create_table(). + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) + @see ha_create_table() in handle.cc */ @@ -5332,27 +6095,24 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED); } - int res; std::string str; + int err; THD *const thd = my_core::thd_get_current_thd(); if (get_table_if_exists(name)) { if (thd->lex->sql_command == SQLCOM_TRUNCATE) { - if (delete_table(name)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + err = delete_table(name); + if (err != HA_EXIT_SUCCESS) { + DBUG_RETURN(err); } } else { - if (rdb_normalize_tablename(name, &str)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + err = rdb_normalize_tablename(name, &str); + if (err != HA_EXIT_SUCCESS) { + DBUG_RETURN(err); } - my_printf_error( - ER_UNKNOWN_ERROR, - "Table '%s' does not exist, but metadata information exists inside " - "MyRocks. This is a sign of data inconsistency. Please check if " - "'%s.frm' exists, and try to restore it if it does not exist.", - MYF(0), str.c_str(), name); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + my_error(ER_METADATA_INCONSISTENCY, MYF(0), str.c_str(), name); + DBUG_RETURN(HA_ERR_ROCKSDB_CORRUPT_DATA); } } @@ -5361,32 +6121,17 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, passes strings like "./test/t14#P#p0" for individual partitions, while table_arg->s->table_name has none of that. */ - if (rdb_normalize_tablename(name, &str)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + err = rdb_normalize_tablename(name, &str); + if (err != HA_EXIT_SUCCESS) { + DBUG_RETURN(err); } if (contains_foreign_key(thd)) { - my_printf_error(ER_NOT_SUPPORTED_YET, "MyRocks does not currently support " - "foreign key constraints", - MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - } - - /* - TODO(alexyang): Temporarily disable unique indexes support when there is no - Primary Key - */ - for (uint keyno = 0; keyno < table_arg->s->keys; keyno++) { - if ((table_arg->key_info[keyno].flags & HA_NOSAME) && - has_hidden_pk(table_arg)) { - my_printf_error(ER_UNKNOWN_ERROR, "Unique index support is disabled when " - "the table has no primary key. ", - MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - } + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "FOREIGN KEY for the RocksDB storage engine"); + DBUG_RETURN(HA_ERR_UNSUPPORTED); } - bool write_err = false; const std::unique_ptr wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); @@ -5407,21 +6152,28 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, m_tbl_def->m_key_count = n_keys; m_tbl_def->m_key_descr_arr = m_key_descr_arr; - if ((res = create_key_defs(table_arg, m_tbl_def))) { + err = create_key_defs(table_arg, m_tbl_def); + if (err != HA_EXIT_SUCCESS) { goto error; } m_pk_descr = m_key_descr_arr[pk_index(table_arg, m_tbl_def)]; dict_manager.lock(); - write_err = - ddl_manager.put_and_write(m_tbl_def, batch) || dict_manager.commit(batch); - dict_manager.unlock(); + err = ddl_manager.put_and_write(m_tbl_def, batch); + if (err != HA_EXIT_SUCCESS) { + dict_manager.unlock(); + goto error; + } - if (write_err) { + err = dict_manager.commit(batch); + if (err != HA_EXIT_SUCCESS) { + dict_manager.unlock(); goto error; } + dict_manager.unlock(); + if (create_info->auto_increment_value) m_tbl_def->m_auto_incr_val = create_info->auto_increment_value; @@ -5447,7 +6199,7 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, m_tbl_def = nullptr; m_key_descr_arr = nullptr; - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(err); } /** @@ -5494,7 +6246,8 @@ bool ha_rocksdb::check_keyread_allowed(uint inx, uint part, int ha_rocksdb::read_key_exact(const Rdb_key_def &kd, rocksdb::Iterator *const iter, const bool &full_key_match, - const rocksdb::Slice &key_slice) const { + const rocksdb::Slice &key_slice, + const int64_t ttl_filter_ts) { DBUG_ASSERT(iter != nullptr); /* @@ -5503,38 +6256,63 @@ int ha_rocksdb::read_key_exact(const Rdb_key_def &kd, lookup_tuple may be a prefix of the index. */ rocksdb_smart_seek(kd.m_is_reverse_cf, iter, key_slice); - if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) { + + while (iter->Valid() && kd.value_matches_prefix(iter->key(), key_slice)) { /* - Got a record that is not equal to the lookup value, or even a record - from another table.index. + If TTL is enabled we need to check if the given key has already expired + from the POV of the current transaction. If it has, try going to the next + key. */ - return HA_ERR_KEY_NOT_FOUND; + if (kd.has_ttl() && should_hide_ttl_rec(iter->value(), ttl_filter_ts)) { + rocksdb_smart_next(kd.m_is_reverse_cf, iter); + continue; + } + + return HA_EXIT_SUCCESS; } - return HA_EXIT_SUCCESS; + + /* + Got a record that is not equal to the lookup value, or even a record + from another table.index. + */ + return HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::read_before_key(const Rdb_key_def &kd, const bool &full_key_match, - const rocksdb::Slice &key_slice) { + const rocksdb::Slice &key_slice, + const int64_t ttl_filter_ts) { /* We are looking for record with the biggest t.key such that t.key < lookup_tuple. */ rocksdb_smart_seek(!kd.m_is_reverse_cf, m_scan_it, key_slice); - if (m_scan_it->Valid() && full_key_match && - kd.value_matches_prefix(m_scan_it->key(), key_slice)) { - /* We are using full key and we've hit an exact match */ - if (kd.m_is_reverse_cf) { - m_scan_it->Next(); - } else { - m_scan_it->Prev(); + + while (m_scan_it->Valid()) { + /* + We are using full key and we've hit an exact match, or... + + If TTL is enabled we need to check if the given key has already expired + from the POV of the current transaction. If it has, try going to the next + key. + */ + if ((full_key_match && + kd.value_matches_prefix(m_scan_it->key(), key_slice)) || + (kd.has_ttl() && + should_hide_ttl_rec(m_scan_it->value(), ttl_filter_ts))) { + rocksdb_smart_next(!kd.m_is_reverse_cf, m_scan_it); + continue; } + + return HA_EXIT_SUCCESS; } - return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; + + return HA_ERR_KEY_NOT_FOUND; } int ha_rocksdb::read_after_key(const Rdb_key_def &kd, - const rocksdb::Slice &key_slice) { + const rocksdb::Slice &key_slice, + const int64_t ttl_filter_ts) { /* We are looking for the first record such that @@ -5544,27 +6322,37 @@ int ha_rocksdb::read_after_key(const Rdb_key_def &kd, with HA_READ_KEY_OR_NEXT, $GT = '>=' */ rocksdb_smart_seek(kd.m_is_reverse_cf, m_scan_it, key_slice); + + /* + If TTL is enabled we need to check if the given key has already expired + from the POV of the current transaction. If it has, try going to the next + key. + */ + while (m_scan_it->Valid() && kd.has_ttl() && + should_hide_ttl_rec(m_scan_it->value(), ttl_filter_ts)) { + rocksdb_smart_next(kd.m_is_reverse_cf, m_scan_it); + } + return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND; } -int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd, - const enum ha_rkey_function &find_flag, - const bool &full_key_match, - const uchar *const key, - const key_part_map &keypart_map, - const rocksdb::Slice &key_slice, - bool *const move_forward) { +int ha_rocksdb::position_to_correct_key( + const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, + const bool &full_key_match, const uchar *const key, + const key_part_map &keypart_map, const rocksdb::Slice &key_slice, + bool *const move_forward, const int64_t ttl_filter_ts) { int rc = 0; *move_forward = true; switch (find_flag) { case HA_READ_KEY_EXACT: - rc = read_key_exact(kd, m_scan_it, full_key_match, key_slice); + rc = + read_key_exact(kd, m_scan_it, full_key_match, key_slice, ttl_filter_ts); break; case HA_READ_BEFORE_KEY: *move_forward = false; - rc = read_before_key(kd, full_key_match, key_slice); + rc = read_before_key(kd, full_key_match, key_slice, ttl_filter_ts); if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc = HA_ERR_KEY_NOT_FOUND; @@ -5572,7 +6360,7 @@ int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd, break; case HA_READ_AFTER_KEY: case HA_READ_KEY_OR_NEXT: - rc = read_after_key(kd, key_slice); + rc = read_after_key(kd, key_slice, ttl_filter_ts); if (rc == 0 && !kd.covers_key(m_scan_it->key())) { /* The record we've got is not from this index */ rc = HA_ERR_KEY_NOT_FOUND; @@ -5595,7 +6383,7 @@ int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd, records with prefix=lookup, we should return the last record before that. */ - rc = read_before_key(kd, full_key_match, key_slice); + rc = read_before_key(kd, full_key_match, key_slice, ttl_filter_ts); if (rc == 0) { const rocksdb::Slice &rkey = m_scan_it->key(); if (!kd.covers_key(rkey)) { @@ -5705,10 +6493,11 @@ int ha_rocksdb::read_row_from_secondary_key(uchar *const buf, pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); const rocksdb::Slice &value = m_scan_it->value(); - if (pk_size == RDB_INVALID_KEY_LEN || - kd.unpack_record(table, buf, &rkey, &value, - m_verify_row_debug_checksums)) { - rc = HA_ERR_INTERNAL_ERROR; + if (pk_size == RDB_INVALID_KEY_LEN) { + rc = HA_ERR_ROCKSDB_CORRUPT_DATA; + } else { + rc = kd.unpack_record(table, buf, &rkey, &value, + m_verify_row_debug_checksums); } } else { if (kd.m_is_reverse_cf) @@ -5720,7 +6509,7 @@ int ha_rocksdb::read_row_from_secondary_key(uchar *const buf, pk_size = kd.get_primary_key_tuple(table, *m_pk_descr, &rkey, m_pk_packed_tuple); if (pk_size == RDB_INVALID_KEY_LEN) { - rc = HA_ERR_INTERNAL_ERROR; + rc = HA_ERR_ROCKSDB_CORRUPT_DATA; } else { rc = get_row_by_rowid(buf, m_pk_packed_tuple, pk_size); } @@ -5812,7 +6601,7 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { const uint size = m_key_descr_arr[keyno]->get_primary_key_tuple( table, *m_pk_descr, &key, m_pk_packed_tuple); if (size == RDB_INVALID_KEY_LEN) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } m_last_rowkey.copy((const char *)m_pk_packed_tuple, size, @@ -5821,10 +6610,8 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { if (m_keyread_only && m_lock_rows == RDB_LOCK_NONE && !has_hidden_pk(table)) { rocksdb::Slice value = m_scan_it->value(); - if (m_key_descr_arr[keyno]->unpack_record( - table, buf, &key, &value, m_verify_row_debug_checksums)) { - return HA_ERR_INTERNAL_ERROR; - } + rc = m_key_descr_arr[keyno]->unpack_record( + table, buf, &key, &value, m_verify_row_debug_checksums); } else { rc = get_row_by_rowid(buf, m_pk_packed_tuple, size); } @@ -5846,6 +6633,10 @@ int ha_rocksdb::secondary_index_read(const int keyno, uchar *const buf) { The only difference from handler::read_range_first is that ha_rocksdb::read_range_first passes end_key to ha_rocksdb::index_read_map_impl function. + + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::read_range_first(const key_range *const start_key, const key_range *const end_key, @@ -5890,6 +6681,11 @@ int ha_rocksdb::read_range_first(const key_range *const start_key, } } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_read_map(uchar *const buf, const uchar *const key, key_part_map keypart_map, enum ha_rkey_function find_flag) { @@ -5910,6 +6706,10 @@ int ha_rocksdb::index_read_map(uchar *const buf, const uchar *const key, condition length is the same as rocksdb::Slice.size() of the start key. On range scan, equal condition length is MIN(start_key, end_key) of the rocksdb::Slice expression. + + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, key_part_map keypart_map, @@ -5940,8 +6740,10 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, const uint size = kd.pack_index_tuple(table, m_pack_buffer, m_pk_packed_tuple, key, keypart_map); bool skip_lookup = is_blind_delete_enabled(); + rc = get_row_by_rowid(buf, m_pk_packed_tuple, size, - skip_lookup); + skip_lookup, false); + if (!rc && !skip_lookup) { stats.rows_read++; stats.rows_index_first++; @@ -6018,16 +6820,16 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, This will open the iterator and position it at a record that's equal or greater than the lookup tuple. */ - setup_scan_iterator(kd, &slice, use_all_keys, is_ascending(kd, find_flag), - eq_cond_len); + setup_scan_iterator(kd, &slice, use_all_keys, eq_cond_len); /* Once we are positioned on from above, move to the position we really want: See storage/rocksdb/rocksdb-range-access.txt */ bool move_forward; - rc = position_to_correct_key(kd, find_flag, using_full_key, key, - keypart_map, slice, &move_forward); + rc = + position_to_correct_key(kd, find_flag, using_full_key, key, keypart_map, + slice, &move_forward, tx->m_snapshot_timestamp); if (rc) { /* This status is returned on any error */ @@ -6117,9 +6919,10 @@ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, } const rocksdb::Slice value = m_scan_it->value(); - if (kd.unpack_record(table, buf, &rkey, &value, - m_verify_row_debug_checksums)) { - return HA_ERR_INTERNAL_ERROR; + int err = kd.unpack_record(table, buf, &rkey, &value, + m_verify_row_debug_checksums); + if (err != HA_EXIT_SUCCESS) { + return err; } const enum icp_result icp_status = check_index_cond(); @@ -6152,6 +6955,11 @@ int ha_rocksdb::find_icp_matching_index_rec(const bool &move_forward, return HA_EXIT_SUCCESS; } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_read_last_map(uchar *const buf, const uchar *const key, key_part_map keypart_map) { DBUG_ENTER_FUNC(); @@ -6159,6 +6967,11 @@ int ha_rocksdb::index_read_last_map(uchar *const buf, const uchar *const key, DBUG_RETURN(index_read_map(buf, key, keypart_map, HA_READ_PREFIX_LAST)); } +/** + @return + HA_ADMIN_OK OK + other HA_ADMIN error code +*/ int ha_rocksdb::check(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); @@ -6390,13 +7203,13 @@ bool ha_rocksdb::is_blind_delete_enabled() Given a rowid (i.e. packed PK) as a parameter, get the record. @return - 0 - Ok - other - HA_ERR error code. + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, - const uint rowid_size, - const bool skip_lookup) { + const uint rowid_size, const bool skip_lookup, + const bool skip_ttl_check) { DBUG_ENTER_FUNC(); DBUG_ASSERT(buf != nullptr); @@ -6441,16 +7254,27 @@ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, } if (!s.IsNotFound() && !s.ok()) { - DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, + m_table_handler)); } found = !s.IsNotFound(); table->status = STATUS_NOT_FOUND; if (found) { + /* If we found the record, but it's expired, pretend we didn't find it. */ + if (!skip_ttl_check && m_pk_descr->has_ttl() && + should_hide_ttl_rec(rocksdb::Slice(&m_retrieved_record.front(), + m_retrieved_record.size()), + tx->m_snapshot_timestamp)) { + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); + } + m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin); rc = convert_record_from_storage_format(&key_slice, buf); - if (!rc) + + if (!rc) { table->status = 0; + } } else { /* Note: we don't need to unlock the row. It is intentional that we keep @@ -6462,6 +7286,11 @@ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid, DBUG_RETURN(rc); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_next(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -6478,6 +7307,11 @@ int ha_rocksdb::index_next(uchar *const buf) { DBUG_RETURN(rc); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_prev(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -6518,6 +7352,11 @@ int ha_rocksdb::index_next_with_direction(uchar *const buf, bool move_forward) { DBUG_RETURN(rc); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_first(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -6532,6 +7371,11 @@ int ha_rocksdb::index_first(uchar *const buf) { DBUG_RETURN(rc); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_last(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -6610,8 +7454,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { - setup_scan_iterator(kd, &index_key, false, !kd.m_is_reverse_cf, - Rdb_key_def::INDEX_NUMBER_SIZE); + setup_scan_iterator(kd, &index_key, false, Rdb_key_def::INDEX_NUMBER_SIZE); m_scan_it->Seek(index_key); m_skip_scan_it_next_call = true; @@ -6653,13 +7496,13 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { (n+1) <---- Doesn't exist, but would be here. (n+1)-smth, or no value at all - RocksDB's Iterator::Seek($val) seeks to "at $val or first value that's - greater". We can't see to "(n)-ccc" directly, because we don't know what + RocksDB's Iterator::SeekForPrev($val) seeks to "at $val or last value that's + smaller". We can't seek to "(n)-ccc" directly, because we don't know what is the value of 'ccc' (the biggest record with prefix (n)). Instead, we seek to "(n+1)", which is the least possible value that's greater than any value - in index #n. Then we step one record back. + in index #n. - So, need to: it->Seek(n+1) || it->SeekToLast(); it->Prev(); + So, need to: it->SeekForPrev(n+1) A backward-ordered keyspace: @@ -6670,7 +7513,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) { ( n ) <--- Doesn't exist, but would be here. (n-1)-smth, or no value at all - So, need to: it->Seek(n) || it->SeekToLast(); it->Prev(); + So, need to: it->SeekForPrev(n) */ int ha_rocksdb::index_last_intern(uchar *const buf) { @@ -6706,8 +7549,7 @@ int ha_rocksdb::index_last_intern(uchar *const buf) { // Loop as long as we get a deadlock error AND we end up creating the // snapshot here (i.e. it did not exist prior to this) for (;;) { - setup_scan_iterator(kd, &index_key, false, kd.m_is_reverse_cf, - Rdb_key_def::INDEX_NUMBER_SIZE); + setup_scan_iterator(kd, &index_key, false, Rdb_key_def::INDEX_NUMBER_SIZE); m_scan_it->SeekForPrev(index_key); m_skip_scan_it_next_call = false; @@ -6854,18 +7696,6 @@ bool ha_rocksdb::is_pk(const uint index, const TABLE *const table_arg, is_hidden_pk(index, table_arg, tbl_def_arg); } -/* - Formats the string and returns the column family name assignment part for a - specific partition. -*/ -const std::string ha_rocksdb::gen_cf_name_qualifier_for_partition( - const std::string& prefix) { - DBUG_ASSERT(!prefix.empty()); - - return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + RDB_CF_NAME_QUALIFIER - + RDB_PER_PARTITION_QUALIFIER_VALUE_SEP; -} - const char *ha_rocksdb::get_key_name(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) { @@ -6917,53 +7747,39 @@ const std::string ha_rocksdb::generate_cf_name(const uint index, // `get_key_comment` can return `nullptr`, that's why this. std::string key_comment = comment ? comment : ""; - // If table has partitions then we need to check if user has requested to - // create a column family with a specific name on a per partition basis. - if (table_arg->part_info != nullptr) { - std::string partition_name = tbl_def_arg->base_partition(); - DBUG_ASSERT(!partition_name.empty()); - - // Let's fetch the comment for a index and check if there's a custom key - // name specified for a partition we are handling. - std::vector v = myrocks::parse_into_tokens(key_comment, - RDB_QUALIFIER_SEP); - std::string part_to_search = gen_cf_name_qualifier_for_partition( - partition_name); - DBUG_ASSERT(!part_to_search.empty()); - - // Basic O(N) search for a matching assignment. At most we expect maybe - // ten or so elements here. - for (const auto &it : v) { - if (it.substr(0, part_to_search.length()) == part_to_search) { - // We found a prefix match. Try to parse it as an assignment. - std::vector tokens = myrocks::parse_into_tokens(it, - RDB_PER_PARTITION_QUALIFIER_VALUE_SEP); - - // We found a custom name, it was in the form we expected it to be. - // Return that instead of whatever we initially wanted to return. In - // a case below the `foo` part will be returned to the caller. - // - // p3_cfname=foo - // - // If no value was specified then we'll return an empty string which - // later gets translated into using a default CF. - if (tokens.size() == 2) { - *per_part_match_found = true; - return tokens[1]; - } else { - return ""; - } - } - } + std::string cf_name = Rdb_key_def::parse_comment_for_qualifier( + key_comment, table_arg, tbl_def_arg, per_part_match_found, + RDB_CF_NAME_QUALIFIER); + if (table_arg->part_info != nullptr && !*per_part_match_found) { // At this point we tried to search for a custom CF name for a partition, // but none was specified. Therefore default one will be used. return ""; } - return key_comment; + // If we didn't find any partitioned/non-partitioned qualifiers, return the + // comment itself. NOTE: this currently handles returning the cf name + // specified in the index comment in the case of no partitions, which doesn't + // use any qualifiers at the moment. (aka its a special case) + if (cf_name.empty() && !key_comment.empty()) { + return key_comment; + } + + return cf_name; +} + +const std::string ha_rocksdb::get_table_comment(const TABLE *const table_arg) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(table_arg->s != nullptr); + + return table_arg->s->comment.str; } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::write_row(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -7023,9 +7839,10 @@ int ha_rocksdb::get_pk_for_update(struct update_row_info *const row_info) { row_info->new_pk_unpack_info = &m_pk_unpack_info; - size = m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, - m_pk_packed_tuple, - row_info->new_pk_unpack_info, false); + size = + m_pk_descr->pack_record(table, m_pack_buffer, row_info->new_data, + m_pk_packed_tuple, row_info->new_pk_unpack_info, + false, 0, 0, nullptr, &row_info->ttl_pk_offset); } else if (row_info->old_data == nullptr) { row_info->hidden_pk_id = update_hidden_pk_val(); size = @@ -7037,8 +7854,9 @@ int ha_rocksdb::get_pk_for_update(struct update_row_info *const row_info) { */ size = row_info->old_pk_slice.size(); memcpy(m_pk_packed_tuple, row_info->old_pk_slice.data(), size); - if (read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id)) { - return HA_ERR_INTERNAL_ERROR; + int err = read_hidden_pk_id_from_rowkey(&row_info->hidden_pk_id); + if (err) { + return err; } } @@ -7097,8 +7915,8 @@ int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, get_for_update(row_info.tx, m_pk_descr->get_cf(), row_info.new_pk_slice, &m_retrieved_record); if (!s.ok() && !s.IsNotFound()) { - return row_info.tx->set_status_error(table->in_use, s, - *m_key_descr_arr[key_id], m_tbl_def); + return row_info.tx->set_status_error( + table->in_use, s, *m_key_descr_arr[key_id], m_tbl_def, m_table_handler); } *found = !s.IsNotFound(); @@ -7107,7 +7925,7 @@ int ha_rocksdb::check_and_lock_unique_pk(const uint &key_id, int ha_rocksdb::check_and_lock_sk(const uint &key_id, const struct update_row_info &row_info, - bool *const found) const { + bool *const found) { DBUG_ASSERT(found != nullptr); *found = false; @@ -7160,8 +7978,8 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, */ if (row_info.old_data != nullptr) { size = kd.pack_record(table, m_pack_buffer, row_info.old_data, - m_sk_packed_tuple_old, nullptr, false, - row_info.hidden_pk_id, user_defined_key_parts); + m_sk_packed_tuple_old, nullptr, false, 0, + user_defined_key_parts); const rocksdb::Slice old_slice = rocksdb::Slice((const char *)m_sk_packed_tuple_old, size); @@ -7199,8 +8017,7 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, The bloom filter may need to be disabled for this lookup. */ const bool total_order_seek = !can_use_bloom_filter( - ha_thd(), kd, new_slice, all_parts_used, - is_ascending(*m_key_descr_arr[key_id], HA_READ_KEY_EXACT)); + ha_thd(), kd, new_slice, all_parts_used); const bool fill_cache = !THDVAR(ha_thd(), skip_fill_cache); /* @@ -7210,7 +8027,8 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, const rocksdb::Status s = get_for_update(row_info.tx, kd.get_cf(), new_slice, &dummy_value); if (!s.ok() && !s.IsNotFound()) { - return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); + return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def, + m_table_handler); } rocksdb::Iterator *const iter = row_info.tx->get_iterator( @@ -7222,7 +8040,8 @@ int ha_rocksdb::check_and_lock_sk(const uint &key_id, in the transaction. */ iter->Seek(new_slice); - *found = !read_key_exact(kd, iter, all_parts_used, new_slice); + *found = !read_key_exact(kd, iter, all_parts_used, new_slice, + row_info.tx->m_snapshot_timestamp); delete iter; return HA_EXIT_SUCCESS; @@ -7246,10 +8065,24 @@ int ha_rocksdb::check_uniqueness_and_lock( rc = check_and_lock_sk(key_id, row_info, &found); } - if (rc != 0) { + if (rc != HA_EXIT_SUCCESS) { return rc; } + /* + If the pk key has ttl, we may need to pretend the row wasn't + found if it is already expired. The pk record is read into + m_retrieved_record by check_and_lock_unique_pk(). + */ + if (is_pk(key_id, table, m_tbl_def) && found && m_pk_descr->has_ttl() && + should_hide_ttl_rec(rocksdb::Slice(&m_retrieved_record.front(), + m_retrieved_record.size()), + (row_info.tx->m_snapshot_timestamp + ? row_info.tx->m_snapshot_timestamp + : static_cast(std::time(nullptr))))) { + found = false; + } + if (found) { /* There is a row with this key already, so error out. */ errkey = key_id; @@ -7297,22 +8130,14 @@ int ha_rocksdb::bulk_load_key(Rdb_transaction *const tx, const Rdb_key_def &kd, if (m_sst_info == nullptr) { m_sst_info = std::make_shared( rdb, m_table_handler->m_table_name, kd.get_name(), cf, - rocksdb_db_options, THDVAR(ha_thd(), trace_sst_api)); + *rocksdb_db_options, THDVAR(ha_thd(), trace_sst_api)); tx->start_bulk_load(this); m_bulk_load_tx = tx; } DBUG_ASSERT(m_sst_info != nullptr); - int rc = m_sst_info->put(key, value); - if (rc != 0) { - my_printf_error(ER_UNKNOWN_ERROR, - "Failed to add a key to sst file writer(%s)", MYF(0), - m_sst_info->error_message().c_str()); - rc = HA_ERR_INTERNAL_ERROR; - } - - return rc; + return m_sst_info->put(key, value); } int ha_rocksdb::finalize_bulk_load() { @@ -7332,22 +8157,6 @@ int ha_rocksdb::finalize_bulk_load() { */ if (m_sst_info != nullptr) { rc = m_sst_info->commit(); - if (rc != 0) { - /* - Log the error immediately here in case the server crashes before - mysql prints via my_printf_error. - */ - sql_print_error("Failed to commit bulk loaded sst file to the " - "data store (%s)", - m_sst_info->error_message().c_str()); - - my_printf_error(ER_UNKNOWN_ERROR, - "Failed to commit bulk loaded sst file to the " - "data store (%s)", - MYF(0), m_sst_info->error_message().c_str()); - rc = HA_ERR_INTERNAL_ERROR; - } - m_sst_info = nullptr; m_bulk_load_tx->end_bulk_load(this); m_bulk_load_tx = nullptr; @@ -7363,14 +8172,20 @@ int ha_rocksdb::update_pk(const Rdb_key_def &kd, const bool &pk_changed) { const uint key_id = kd.get_keyno(); const bool hidden_pk = is_hidden_pk(key_id, table, m_tbl_def); - if (!hidden_pk && pk_changed) { - /* - The old key needs to be deleted. - */ + + /* + If the PK has changed, or if this PK uses single deletes and this is an + update, the old key needs to be deleted. In the single delete case, it + might be possible to have this sequence of keys: PUT(X), PUT(X), SD(X), + resulting in the first PUT(X) showing up. + */ + if (!hidden_pk && (pk_changed || ((row_info.old_pk_slice.size() > 0) && + can_use_single_delete(key_id)))) { const rocksdb::Status s = delete_or_singledelete( key_id, row_info.tx, kd.get_cf(), row_info.old_pk_slice); if (!s.ok()) { - return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def); + return row_info.tx->set_status_error(table->in_use, s, kd, m_tbl_def, + m_table_handler); } } @@ -7378,11 +8193,13 @@ int ha_rocksdb::update_pk(const Rdb_key_def &kd, update_auto_incr_val(); } + int rc = HA_EXIT_SUCCESS; rocksdb::Slice value_slice; - convert_record_to_storage_format(row_info.new_pk_slice, - row_info.new_pk_unpack_info, &value_slice); + /* Prepare the new record to be written into RocksDB */ + if ((rc = convert_record_to_storage_format(row_info, &value_slice))) { + return rc; + } - int rc = 0; const auto cf = m_pk_descr->get_cf(); if (rocksdb_enable_bulk_load_api && THDVAR(table->in_use, bulk_load) && !hidden_pk) { @@ -7390,18 +8207,11 @@ int ha_rocksdb::update_pk(const Rdb_key_def &kd, Write the primary key directly to an SST file using an SstFileWriter */ rc = bulk_load_key(row_info.tx, kd, row_info.new_pk_slice, value_slice); - } else if (row_info.skip_unique_check) { + } else if (row_info.skip_unique_check || row_info.tx->m_ddl_transaction) { /* It is responsibility of the user to make sure that the data being inserted doesn't violate any unique keys. */ - row_info.tx->get_blind_write_batch()->Put(cf, row_info.new_pk_slice, - value_slice); - } else if (row_info.tx->m_ddl_transaction) { - /* - DDL statement must check for unique key conflicts. For example: - ALTER TABLE tbl DROP PRIMARY KEY, ADD PRIMARY KEY(non_unique_column) - */ row_info.tx->get_indexed_write_batch()->Put(cf, row_info.new_pk_slice, value_slice); } else { @@ -7413,7 +8223,7 @@ int ha_rocksdb::update_pk(const Rdb_key_def &kd, rc = HA_ERR_FOUND_DUPP_KEY; } else { rc = row_info.tx->set_status_error(table->in_use, s, *m_pk_descr, - m_tbl_def); + m_tbl_def, m_table_handler); } } } @@ -7486,30 +8296,14 @@ int ha_rocksdb::update_sk(const TABLE *const table_arg, const Rdb_key_def &kd, old_key_slice); } - /* - We're writing a new entry for secondary key. We can skip locking; we - should write to - - WriteBatchWithIndex normally (so that transaction sees the new row) - - non-indexed WriteBatch, when we don't need to see the new row: - = when doing a DDL operation and writing to a non-unique index, or - = when doing a bulk load - */ - rocksdb::WriteBatchBase *write_batch; - if ((row_info.tx->m_ddl_transaction && - !(table_arg->key_info[key_id].flags & HA_NOSAME)) || - row_info.skip_unique_check) { - write_batch = row_info.tx->get_blind_write_batch(); - } else { - write_batch = row_info.tx->get_indexed_write_batch(); - } - new_key_slice = rocksdb::Slice( reinterpret_cast(m_sk_packed_tuple), new_packed_size); new_value_slice = rocksdb::Slice(reinterpret_cast(m_sk_tails.ptr()), m_sk_tails.get_current_pos()); - write_batch->Put(kd.get_cf(), new_key_slice, new_value_slice); + row_info.tx->get_indexed_write_batch()->Put(kd.get_cf(), new_key_slice, + new_value_slice); return HA_EXIT_SUCCESS; } @@ -7526,7 +8320,7 @@ int ha_rocksdb::update_indexes(const struct update_row_info &row_info, rc = update_sk(table, kd, row_info); } - if (rc != 0) { + if (rc != HA_EXIT_SUCCESS) { return rc; } } @@ -7563,7 +8357,7 @@ int ha_rocksdb::update_write_row(const uchar *const old_data, Get the new row key into row_info.new_pk_slice */ int rc = get_pk_for_update(&row_info); - if (rc != 0) { + if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } @@ -7573,7 +8367,7 @@ int ha_rocksdb::update_write_row(const uchar *const old_data, keys. Also lock the appropriate key values. */ rc = check_uniqueness_and_lock(row_info, &pk_changed); - if (rc != 0) { + if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } } @@ -7587,12 +8381,18 @@ int ha_rocksdb::update_write_row(const uchar *const old_data, cannot be easily removed without rolling back the entire transaction. */ rc = update_indexes(row_info, pk_changed); - if (rc != 0) { + if (rc != HA_EXIT_SUCCESS) { DBUG_RETURN(rc); } + if (old_data != nullptr) { + row_info.tx->incr_update_count(); + } else { + row_info.tx->incr_insert_count(); + } + if (do_bulk_commit(row_info.tx)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(HA_ERR_ROCKSDB_BULK_LOAD); } DBUG_RETURN(HA_EXIT_SUCCESS); @@ -7605,7 +8405,6 @@ int ha_rocksdb::update_write_row(const uchar *const old_data, void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice, const bool use_all_keys, - const bool is_ascending, const uint eq_cond_len) { DBUG_ASSERT(slice != nullptr); DBUG_ASSERT(slice->size() >= eq_cond_len); @@ -7615,7 +8414,7 @@ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd, bool skip_bloom = true; const rocksdb::Slice eq_cond(slice->data(), eq_cond_len); - if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys, is_ascending)) { + if (can_use_bloom_filter(ha_thd(), kd, eq_cond, use_all_keys)) { skip_bloom = false; } @@ -7686,6 +8485,11 @@ void ha_rocksdb::setup_iterator_for_rnd_scan() { m_skip_scan_it_next_call = true; } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::rnd_init(bool scan) { DBUG_ENTER_FUNC(); @@ -7707,6 +8511,11 @@ int ha_rocksdb::rnd_init(bool scan) { DBUG_RETURN(HA_EXIT_SUCCESS); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::rnd_next(uchar *const buf) { DBUG_ENTER_FUNC(); @@ -7786,6 +8595,12 @@ int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { */ Rdb_transaction *const tx = get_or_create_tx(table->in_use); DEBUG_SYNC(ha_thd(), "rocksdb_concurrent_delete"); + + if (m_pk_descr->has_ttl() && + should_hide_ttl_rec(m_scan_it->value(), tx->m_snapshot_timestamp)) { + continue; + } + const rocksdb::Status s = get_for_update(tx, m_pk_descr->get_cf(), key, &m_retrieved_record); if (s.IsNotFound() && @@ -7798,8 +8613,8 @@ int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { } if (!s.ok()) { - DBUG_RETURN( - tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, + m_tbl_def, m_table_handler)); } // If we called get_for_update() use the value from that call not from @@ -7810,6 +8625,13 @@ int ha_rocksdb::rnd_next_with_direction(uchar *const buf, bool move_forward) { } else { // Use the value from the iterator rocksdb::Slice value = m_scan_it->value(); + + if (m_pk_descr->has_ttl() && + should_hide_ttl_rec( + value, get_or_create_tx(table->in_use)->m_snapshot_timestamp)) { + continue; + } + m_last_rowkey.copy(key.data(), key.size(), &my_charset_bin); rc = convert_record_from_storage_format(&key, &value, buf); } @@ -7835,6 +8657,11 @@ int ha_rocksdb::rnd_end() { DBUG_RETURN(HA_EXIT_SUCCESS); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::index_init(uint idx, bool sorted) { DBUG_ENTER_FUNC(); @@ -7853,6 +8680,10 @@ int ha_rocksdb::index_init(uint idx, bool sorted) { DBUG_RETURN(HA_EXIT_SUCCESS); } +/** + @return + HA_EXIT_SUCCESS OK +*/ int ha_rocksdb::index_end() { DBUG_ENTER_FUNC(); @@ -7864,6 +8695,11 @@ int ha_rocksdb::index_end() { DBUG_RETURN(HA_EXIT_SUCCESS); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::truncate() { DBUG_ENTER_FUNC(); @@ -7872,9 +8708,7 @@ int ha_rocksdb::truncate() { m_tbl_def->m_auto_incr_val = 1; m_tbl_def->m_hidden_pk_val = 1; - remove_rows(m_tbl_def); - - DBUG_RETURN(HA_EXIT_SUCCESS); + DBUG_RETURN(remove_rows(m_tbl_def)); } /* @@ -7884,6 +8718,10 @@ int ha_rocksdb::truncate() { The caller guarantees table buf points to the row that was just read. The row is either table->record[0] or table->record[1]. (Check out InnoDB: row_update_for_mysql() has "UT_NOT_USED(mysql_rec)" + + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::delete_row(const uchar *const buf) { DBUG_ENTER_FUNC(); @@ -7900,13 +8738,17 @@ int ha_rocksdb::delete_row(const uchar *const buf) { rocksdb::Status s = delete_or_singledelete(index, tx, m_pk_descr->get_cf(), key_slice); if (!s.ok()) { - DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def)); + DBUG_RETURN(tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, + m_table_handler)); } longlong hidden_pk_id = 0; - if (m_tbl_def->m_key_count > 1 && has_hidden_pk(table) && - read_hidden_pk_id_from_rowkey(&hidden_pk_id)) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + if (m_tbl_def->m_key_count > 1 && has_hidden_pk(table)) { + int err = read_hidden_pk_id_from_rowkey(&hidden_pk_id); + if (err) { + DBUG_RETURN(err); + } + } // Delete the record for every secondary index for (uint i = 0; i < m_tbl_def->m_key_count; i++) { @@ -7923,8 +8765,10 @@ int ha_rocksdb::delete_row(const uchar *const buf) { } } + tx->incr_delete_count(); + if (do_bulk_commit(tx)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(HA_ERR_ROCKSDB_BULK_LOAD); } stats.rows_deleted++; update_row_stats(ROWS_DELETED); @@ -7961,11 +8805,16 @@ void ha_rocksdb::update_stats(void) { DBUG_VOID_RETURN; } +/** + @return + HA_EXIT_SUCCESS OK + HA_EXIT_FAILURE Error +*/ int ha_rocksdb::info(uint flag) { DBUG_ENTER_FUNC(); if (!table) - return HA_EXIT_FAILURE; + DBUG_RETURN(HA_EXIT_FAILURE); if (flag & HA_STATUS_VARIABLE) { /* @@ -8004,7 +8853,7 @@ int ha_rocksdb::info(uint flag) { uint8_t include_flags = rocksdb::DB::INCLUDE_FILES; // recompute SST files stats only if records count is 0 - if (stats.records == 0) { + if (stats.records == 0) { rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, include_flags); stats.records+= sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE; @@ -8106,6 +8955,11 @@ void ha_rocksdb::position(const uchar *const record) { DBUG_VOID_RETURN; } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::rnd_pos(uchar *const buf, uchar *const pos) { DBUG_ENTER_FUNC(); @@ -8117,7 +8971,7 @@ int ha_rocksdb::rnd_pos(uchar *const buf, uchar *const pos) { len = m_pk_descr->key_length(table, rocksdb::Slice((const char *)pos, ref_length)); if (len == size_t(-1)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); /* Data corruption? */ + DBUG_RETURN(HA_ERR_ROCKSDB_CORRUPT_DATA); /* Data corruption? */ } rc = get_row_by_rowid(buf, pos, len); @@ -8164,6 +9018,11 @@ void ha_rocksdb::calc_updated_indexes() { } } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) +*/ int ha_rocksdb::update_row(const uchar *const old_data, uchar *const new_data) { DBUG_ENTER_FUNC(); @@ -8286,29 +9145,34 @@ const char *ha_rocksdb::thd_rocksdb_tmpdir() { return (tmp_dir); } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (cannot be SE-specific) +*/ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { DBUG_ENTER_FUNC(); DBUG_ASSERT(thd != nullptr); - int res = 0; + int res = HA_EXIT_SUCCESS; int binlog_format = my_core::thd_binlog_format(thd); bool unsafe_for_binlog = THDVAR(ha_thd(), unsafe_for_binlog); if (lock_type == F_WRLCK && !thd->rli_slave && !unsafe_for_binlog && binlog_format != BINLOG_FORMAT_ROW && binlog_format != BINLOG_FORMAT_UNSPEC && my_core::thd_binlog_filter_ok(thd)) { - my_printf_error( - ER_UNKNOWN_ERROR, - "Can't execute updates on master with binlog_format != ROW.", MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + my_error(ER_REQUIRE_ROW_BINLOG_FORMAT, MYF(0)); + DBUG_RETURN(HA_ERR_UNSUPPORTED); } if (lock_type == F_UNLCK) { Rdb_transaction *const tx = get_or_create_tx(thd); tx->io_perf_end_and_record(&m_io_perf); - if (!my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + tx->m_n_mysql_tables_in_use--; + if (tx->m_n_mysql_tables_in_use == 0 && + !my_core::thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* Do like InnoDB: when we get here, it's time to commit a single-statement transaction. @@ -8317,18 +9181,16 @@ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { for each of them, but that's ok because non-first tx->commit() calls will be no-ops. */ - res = tx->commit_or_rollback(); + if (tx->commit_or_rollback()) { + res = HA_ERR_INTERNAL_ERROR; + } } } else { if (my_core::thd_tx_isolation(thd) < ISO_READ_COMMITTED || my_core::thd_tx_isolation(thd) > ISO_REPEATABLE_READ) { - my_printf_error( - ER_UNKNOWN_ERROR, "MyRocks supports only READ COMMITTED and " - "REPEATABLE READ isolation levels. " - "Please change from current isolation " - "level %s", - MYF(0), tx_isolation_names[my_core::thd_tx_isolation(thd)]); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + my_error(ER_ISOLATION_MODE_NOT_SUPPORTED, MYF(0), + tx_isolation_names[my_core::thd_tx_isolation(thd)]); + DBUG_RETURN(HA_ERR_UNSUPPORTED); } /* It's nice to do the following on start of every statement. The problem @@ -8346,25 +9208,15 @@ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { thd->lex->sql_command == SQLCOM_REPLACE) && (thd->lex->duplicates == DUP_REPLACE || thd->lex->duplicates == DUP_UPDATE)) { - my_printf_error(ER_UNKNOWN_ERROR, - "When unique checking is disabled in MyRocks, INSERT," - "UPDATE,LOAD statements with clauses that update or " - "replace the key (i.e. " - "INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not " - "allowed. Query: %s", - MYF(0), thd->query()); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + my_error(ER_ON_DUPLICATE_DISABLED, MYF(0), thd->query()); + DBUG_RETURN(HA_ERR_UNSUPPORTED); } } if (lock_type == F_WRLCK) { if (tx->is_tx_read_only()) { - my_printf_error(ER_UNKNOWN_ERROR, - "Can't execute updates when you started a transaction " - "with START TRANSACTION WITH CONSISTENT [ROCKSDB] " - "SNAPSHOT.", - MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + my_error(ER_UPDATES_WITH_CONSISTENT_SNAPSHOT, MYF(0)); + DBUG_RETURN(HA_ERR_UNSUPPORTED); } /* @@ -8382,6 +9234,7 @@ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { tx->m_ddl_transaction = true; } } + tx->m_n_mysql_tables_in_use++; rocksdb_register_tx(rocksdb_hton, thd, tx); tx->io_perf_start(&m_io_perf); } @@ -8397,6 +9250,9 @@ int ha_rocksdb::external_lock(THD *const thd, int lock_type) { TABLES. Inside LOCK TABLES the ::external_lock method does not work to mark SQL statement borders. + + @return + HA_EXIT_SUCCESS OK */ int ha_rocksdb::start_stmt(THD *const thd, thr_lock_type lock_type) { @@ -8455,7 +9311,7 @@ static bool is_myrocks_index_empty( uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; rdb_netbuf_store_uint32(key_buf, index_id); const rocksdb::Slice key = - rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)); + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)); std::unique_ptr it(rdb->NewIterator(read_opts, cfh)); rocksdb_smart_seek(is_reverse_cf, it.get(), key); if (!it->Valid()) { @@ -8570,7 +9426,7 @@ Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) { DBUG_ASSERT(tablename != nullptr); std::string str; - if (rdb_normalize_tablename(tablename, &str)) { + if (rdb_normalize_tablename(tablename, &str) != HA_EXIT_SUCCESS) { // We were not passed table name? DBUG_ASSERT(0); return nullptr; @@ -8584,6 +9440,10 @@ Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) { this->table==nullptr, pk_key_descr==nullptr, etc. tablename points to line in form "./dbname/tablename". + + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (can be SE-specific) */ int ha_rocksdb::delete_table(const char *const tablename) { @@ -8605,21 +9465,23 @@ int ha_rocksdb::delete_table(const char *const tablename) { the persistent data dictionary). */ ddl_manager.remove(tbl, batch, true); - if (dict_manager.commit(batch)) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + int err = dict_manager.commit(batch); + if (err) { + DBUG_RETURN(err); + } rdb_drop_idx_thread.signal(); DBUG_RETURN(HA_EXIT_SUCCESS); } -void ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { +int ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { const rocksdb::WriteOptions wo = rdb_get_rocksdb_write_options(handler::ha_thd()); rocksdb::ReadOptions opts; opts.total_order_seek = true; - rocksdb::Iterator *const it = rdb->NewIterator(opts); + Rdb_transaction *const tx = get_or_create_tx(table->in_use); char key_buf[MAX_KEY_LENGTH]; uint key_len; @@ -8631,6 +9493,9 @@ void ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { for (uint i = 0; i < tbl->m_key_count; i++) { const Rdb_key_def &kd = *tbl->m_key_descr_arr[i]; kd.get_infimum_key(reinterpret_cast(key_buf), &key_len); + rocksdb::ColumnFamilyHandle *cf = kd.get_cf(); + + std::unique_ptr it(rdb->NewIterator(opts, cf)); const rocksdb::Slice table_key(key_buf, key_len); it->Seek(table_key); @@ -8640,78 +9505,81 @@ void ha_rocksdb::remove_rows(Rdb_tbl_def *const tbl) { break; } - if (can_use_single_delete(i)) - rdb->SingleDelete(wo, key); - else - rdb->Delete(wo, key); + rocksdb::Status s; + if (can_use_single_delete(i)) { + s = rdb->SingleDelete(wo, cf, key); + } else { + s = rdb->Delete(wo, cf, key); + } + + if (!s.ok()) { + return tx->set_status_error(table->in_use, s, *m_pk_descr, m_tbl_def, + m_table_handler); + } + it->Next(); } } - delete it; + + return HA_EXIT_SUCCESS; } +/** + @return + HA_EXIT_SUCCESS OK + other HA_ERR error code (cannot be SE-specific) +*/ int ha_rocksdb::rename_table(const char *const from, const char *const to) { DBUG_ENTER_FUNC(); DBUG_ASSERT(from != nullptr); DBUG_ASSERT(to != nullptr); - bool write_err = false; std::string from_str; std::string to_str; std::string from_db; std::string to_db; - if (rdb_normalize_tablename(from, &from_str) || - rdb_split_normalized_tablename(from_str, &from_db)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + int rc = rdb_normalize_tablename(from, &from_str); + if (rc != HA_EXIT_SUCCESS) { + DBUG_RETURN(rc); } - if (rdb_normalize_tablename(to, &to_str) || - rdb_split_normalized_tablename(to_str, &to_db)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + rc = rdb_split_normalized_tablename(from_str, &from_db); + if (rc != HA_EXIT_SUCCESS) { + DBUG_RETURN(rc); } - // If the user changed the database part of the name then validate that the - // 'to' database exists. - if (from_db != to_db && !rdb_database_exists(to_db)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + rc = rdb_normalize_tablename(to, &to_str); + if (rc != HA_EXIT_SUCCESS) { + DBUG_RETURN(rc); } - /* Check if any index has a per-index column family */ - { - /* - We can't check this directly, because - 1. the object that referred to the old table has been close()d. - 2. this ha_rocksdb object has no connection to any table at all, it has - been just created with (TABLE*)nullptr. - - So, we have to find the old table in the ddl_manager (it's there because - it has been opened by this statement), and check is_auto_cf there. - */ - Rdb_tbl_def *tdef; - if (!(tdef = ddl_manager.find(from_str))) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - - for (uint i = 0; i < tdef->m_key_count; i++) { - DBUG_ASSERT(tdef->m_key_descr_arr != nullptr); + rc = rdb_split_normalized_tablename(to_str, &to_db); + if (rc != HA_EXIT_SUCCESS) { + DBUG_RETURN(rc); + } - if (tdef->m_key_descr_arr[i]->m_is_auto_cf) { - my_error(ER_NOT_SUPPORTED_YET, MYF(0), - "ALTER TABLE on table with per-index CF"); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - } - } + // If the user changed the database part of the name then validate that the + // 'to' database exists. + if (from_db != to_db && !rdb_database_exists(to_db)) { + // If we return a RocksDB specific error code here we get + // "error: 206 - Unknown error 206". InnoDB gets + // "error -1 - Unknown error -1" so let's match them. + DBUG_RETURN(-1); } const std::unique_ptr wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); dict_manager.lock(); - write_err = - ddl_manager.rename(from_str, to_str, batch) || dict_manager.commit(batch); + if (ddl_manager.rename(from_str, to_str, batch)) { + rc = HA_ERR_NO_SUCH_TABLE; + } else { + rc = dict_manager.commit(batch); + } dict_manager.unlock(); - DBUG_RETURN(write_err ? HA_ERR_INTERNAL_ERROR : 0); + DBUG_RETURN(rc); } /** @@ -8735,6 +9603,10 @@ bool ha_rocksdb::check_if_incompatible_data(HA_CREATE_INFO *const info, DBUG_RETURN(COMPATIBLE_DATA_NO); } +/** + @return + HA_EXIT_SUCCESS OK +*/ int ha_rocksdb::extra(enum ha_extra_function operation) { DBUG_ENTER_FUNC(); @@ -8887,6 +9759,10 @@ void ha_rocksdb::update_create_info(HA_CREATE_INFO *const create_info) { the first row of the next index id. When using reverse order column family, the first row of the next index id should be the last row of the previous index id. + + @return + HA_ADMIN_OK OK + other HA_ADMIN error code */ int ha_rocksdb::optimize(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); @@ -8894,20 +9770,18 @@ int ha_rocksdb::optimize(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(check_opt != nullptr); - int rc = 0; for (uint i = 0; i < table->s->keys; i++) { uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2]; auto range = get_range(i, buf); - if (!rdb->CompactRange(rocksdb::CompactRangeOptions(), - m_key_descr_arr[i]->get_cf(), &range.start, - &range.limit) - .ok()) { - rc = 1; - break; + const rocksdb::Status s = rdb->CompactRange(rocksdb::CompactRangeOptions(), + m_key_descr_arr[i]->get_cf(), + &range.start, &range.limit); + if (!s.ok()) { + DBUG_RETURN(rdb_error_to_mysql(s)); } } - DBUG_RETURN(rc); + DBUG_RETURN(HA_EXIT_SUCCESS); } int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, @@ -8945,8 +9819,10 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, const auto status = rdb->GetPropertiesOfTablesInRange( it.first, &it.second[0], it.second.size(), &props); DBUG_ASSERT(props.size() >= old_size); - if (!status.ok()) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + if (!status.ok()) { + DBUG_RETURN( + rdb_error_to_mysql(status, "Could not access RocksDB properties")); + } } int num_sst = 0; @@ -8992,14 +9868,19 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd, DBUG_RETURN(HA_EXIT_SUCCESS); } +/* + @return + HA_ADMIN_OK OK + other HA_ADMIN error code +*/ int ha_rocksdb::analyze(THD *const thd, HA_CHECK_OPT *const check_opt) { DBUG_ENTER_FUNC(); - if (!table) { - DBUG_RETURN(HA_EXIT_FAILURE); + if (table && calculate_stats(table, thd, check_opt) != HA_EXIT_SUCCESS) { + DBUG_RETURN(HA_ADMIN_FAILED); } - DBUG_RETURN(calculate_stats(table, thd, check_opt)); + DBUG_RETURN(HA_ADMIN_OK); } void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, @@ -9027,27 +9908,61 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, off = 1; } + longlong new_val; + // Local variable reference to simplify code below std::atomic &auto_incr = m_tbl_def->m_auto_incr_val; if (inc == 1 && off == 1) { // Optimization for the standard case where we are always simply // incrementing from the last position - *first_value = auto_incr++; + + // Use CAS operation in a loop to make sure automically get the next auto + // increment value while ensuring tha we don't wrap around to a negative + // number. + new_val = auto_incr; + while (new_val != std::numeric_limits::max()) { + if (auto_incr.compare_exchange_weak(new_val, new_val + 1)) { + break; + } + } } else { // The next value can be more complicated if either `inc` or 'off' is not 1 - longlong new_val; longlong last_val = auto_incr; // Loop until we can correctly update the atomic value do { + if (((last_val - off) / inc) == + (std::numeric_limits::max() - off) / inc) { + // The 'last_val' value is already equal to or larger than the largest + // value in the sequence. Continuing would wrap around (technically + // the behavior would be undefined). What should we do? + // We could: + // 1) set the new value to the last possible number in our sequence + // as described above. The problem with this is that this + // number could be smaller than a value in an existing row. + // 2) set the new value to the largest possible number. This number + // may not be in our sequence, but it is guaranteed to be equal + // to or larger than any other value already inserted. + // + // For now I'm going to take option @2. + new_val = std::numeric_limits::max(); + auto_incr = new_val; // Store the largest value into auto_incr + break; + } + // Calculate the next value in the auto increment series: // offset + N * increment // where N is 0, 1, 2, ... // // For further information please visit: // http://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html - new_val = ((last_val + (inc - off) - 1) / inc) * inc + off; + // + // The following is confusing so here is an explanation: + // To get the next number in the sequence above you subtract out + // the offset, calculate the next sequence (N * increment) and then add + // the offset back in. + new_val = (((last_val - off) + (inc - 1)) / inc) * inc + off; // Attempt to store the new value (plus 1 since m_auto_incr_val contains // the next available value) into the atomic value. If the current @@ -9055,10 +9970,9 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, // we will repeat the loop (`last_val` will automatically get updated // with the current value). } while (!auto_incr.compare_exchange_weak(last_val, new_val + 1)); - - *first_value = new_val; } + *first_value = new_val; *nb_reserved_values = 1; } @@ -9147,47 +10061,6 @@ enum icp_result ha_rocksdb::check_index_cond() const { return pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH; } -/** - Checking if an index is used for ascending scan or not - - @detail - Currently RocksDB does not support bloom filter for - prefix lookup + descending scan, but supports bloom filter for - prefix lookup + ascending scan. This function returns true if - the scan pattern is absolutely ascending. - @param kd - @param find_flag -*/ -bool ha_rocksdb::is_ascending(const Rdb_key_def &kd, - enum ha_rkey_function find_flag) const { - bool is_ascending; - switch (find_flag) { - case HA_READ_KEY_EXACT: { - is_ascending = !kd.m_is_reverse_cf; - break; - } - case HA_READ_PREFIX: { - is_ascending = true; - break; - } - case HA_READ_KEY_OR_NEXT: - case HA_READ_AFTER_KEY: { - is_ascending = !kd.m_is_reverse_cf; - break; - } - case HA_READ_KEY_OR_PREV: - case HA_READ_BEFORE_KEY: - case HA_READ_PREFIX_LAST: - case HA_READ_PREFIX_LAST_OR_PREV: { - is_ascending = kd.m_is_reverse_cf; - break; - } - default: - is_ascending = false; - } - return is_ascending; -} - /* Checks if inplace alter is supported for a given operation. */ @@ -9252,8 +10125,6 @@ bool ha_rocksdb::prepare_inplace_alter_table( DBUG_ASSERT(altered_table != nullptr); DBUG_ASSERT(ha_alter_info != nullptr); - const uint n_dropped_keys = ha_alter_info->index_drop_count; - const uint n_added_keys = ha_alter_info->index_add_count; const uint old_n_keys = m_tbl_def->m_key_count; uint new_n_keys = altered_table->s->keys; @@ -9261,6 +10132,7 @@ bool ha_rocksdb::prepare_inplace_alter_table( new_n_keys += 1; } + const TABLE *const old_table = table; std::shared_ptr *const old_key_descr = m_tbl_def->m_key_descr_arr; std::shared_ptr *const new_key_descr = @@ -9290,9 +10162,7 @@ bool ha_rocksdb::prepare_inplace_alter_table( new_tdef->m_key_descr_arr = nullptr; delete new_tdef; - my_printf_error(ER_UNKNOWN_ERROR, - "MyRocks failed creating new key definitions during alter.", - MYF(0)); + my_error(ER_KEY_CREATE_DURING_ALTER, MYF(0)); DBUG_RETURN(HA_EXIT_FAILURE); } @@ -9301,33 +10171,60 @@ bool ha_rocksdb::prepare_inplace_alter_table( uint i; uint j; - const KEY *key; /* Determine which(if any) key definition(s) need to be dropped */ for (i = 0; i < ha_alter_info->index_drop_count; i++) { - const KEY *const key = ha_alter_info->index_drop_buffer[i]; + const KEY *const dropped_key = ha_alter_info->index_drop_buffer[i]; for (j = 0; j < old_n_keys; j++) { - if (!old_key_descr[j]->m_name.compare(key->name)) { + const KEY *const old_key = + &old_table->key_info[old_key_descr[j]->get_keyno()]; + + if (!compare_keys(old_key, dropped_key)) { dropped_index_ids.insert(old_key_descr[j]->get_gl_index_id()); break; } } } - DBUG_ASSERT(dropped_index_ids.size() == ha_alter_info->index_drop_count); - /* Determine which(if any) key definitions(s) need to be added */ + int identical_indexes_found = 0; for (i = 0; i < ha_alter_info->index_add_count; i++) { - key = &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; + const KEY *const added_key = + &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; for (j = 0; j < new_n_keys; j++) { - if (!new_key_descr[j]->m_name.compare(key->name)) { - added_indexes.insert(new_key_descr[j]); + const KEY *const new_key = + &altered_table->key_info[new_key_descr[j]->get_keyno()]; + if (!compare_keys(new_key, added_key)) { + /* + Check for cases where an 'identical' index is being dropped and + re-added in a single ALTER statement. Turn this into a no-op as the + index has not changed. + + E.G. Unique index -> non-unique index requires no change + + Note that cases where the index name remains the same but the + key-parts are changed is already handled in create_inplace_key_defs. + In these cases the index needs to be rebuilt. + */ + if (dropped_index_ids.count(new_key_descr[j]->get_gl_index_id())) { + dropped_index_ids.erase(new_key_descr[j]->get_gl_index_id()); + identical_indexes_found++; + } else { + added_indexes.insert(new_key_descr[j]); + } + break; } } } - DBUG_ASSERT(added_indexes.size() == ha_alter_info->index_add_count); + const uint n_dropped_keys = + ha_alter_info->index_drop_count - identical_indexes_found; + const uint n_added_keys = + ha_alter_info->index_add_count - identical_indexes_found; + DBUG_ASSERT(dropped_index_ids.size() == n_dropped_keys); + DBUG_ASSERT(added_indexes.size() == n_added_keys); + DBUG_ASSERT(new_n_keys == (old_n_keys - n_dropped_keys + n_added_keys)); ha_alter_info->handler_ctx = new Rdb_inplace_alter_ctx( new_tdef, old_key_descr, new_key_descr, old_n_keys, new_n_keys, @@ -9387,17 +10284,13 @@ bool ha_rocksdb::inplace_alter_table( altered_table, ctx->m_new_tdef, ha_alter_info->handler_flags & my_core::Alter_inplace_info::ADD_UNIQUE_INDEX))) { - my_printf_error(ER_UNKNOWN_ERROR, - "MyRocks failed allocating key buffers during alter.", - MYF(0)); + my_error(ER_OUT_OF_RESOURCES, MYF(0)); DBUG_RETURN(err); } /* Populate all new secondary keys by scanning the primary key. */ if ((err = inplace_populate_sk(altered_table, ctx->m_added_indexes))) { - my_printf_error(ER_UNKNOWN_ERROR, - "MyRocks failed populating secondary key during alter.", - MYF(0)); + my_error(ER_SK_POPULATE_DURING_ALTER, MYF(0)); DBUG_RETURN(HA_EXIT_FAILURE); } } @@ -9417,6 +10310,7 @@ int ha_rocksdb::inplace_populate_sk( TABLE *const new_table_arg, const std::unordered_set> &indexes) { DBUG_ENTER_FUNC(); + int res = HA_EXIT_SUCCESS; const std::unique_ptr wb = dict_manager.begin(); rocksdb::WriteBatch *const batch = wb.get(); @@ -9427,7 +10321,10 @@ int ha_rocksdb::inplace_populate_sk( create_index_ids.insert(index->get_gl_index_id()); } dict_manager.add_create_index(create_index_ids, batch); - dict_manager.commit(batch); + res = dict_manager.commit(batch); + if (res != HA_EXIT_SUCCESS) { + return res; + } /* Add uncommitted key definitons to ddl_manager. We need to do this @@ -9440,7 +10337,6 @@ int ha_rocksdb::inplace_populate_sk( const bool hidden_pk_exists = has_hidden_pk(table); - int res = 0; Rdb_transaction *tx = get_or_create_tx(table->in_use); /* @@ -9506,11 +10402,12 @@ int ha_rocksdb::inplace_populate_sk( for (res = index_first(table->record[0]); res == 0; res = index_next(table->record[0])) { longlong hidden_pk_id = 0; - if (hidden_pk_exists && read_hidden_pk_id_from_rowkey(&hidden_pk_id)) { + if (hidden_pk_exists && + (res = read_hidden_pk_id_from_rowkey(&hidden_pk_id))) { // NO_LINT_DEBUG sql_print_error("Error retrieving hidden pk id."); ha_index_end(); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + DBUG_RETURN(res); } /* Create new secondary index entry */ @@ -9573,7 +10470,7 @@ int ha_rocksdb::inplace_populate_sk( print_keydup_error(new_table_arg, &new_table_arg->key_info[index->get_keyno()], - MYF(0)); + MYF(0), ha_thd()); DBUG_RETURN(ER_DUP_ENTRY); } } @@ -9837,9 +10734,7 @@ struct rocksdb_status_counters_t { uint64_t no_file_closes; uint64_t no_file_opens; uint64_t no_file_errors; - uint64_t l0_slowdown_micros; - uint64_t memtable_compaction_micros; - uint64_t l0_num_files_stall_micros; + uint64_t stall_micros; uint64_t rate_limit_delay_millis; uint64_t num_iterators; uint64_t number_multiget_get; @@ -9893,9 +10788,7 @@ DEF_SHOW_FUNC(bytes_read, BYTES_READ) DEF_SHOW_FUNC(no_file_closes, NO_FILE_CLOSES) DEF_SHOW_FUNC(no_file_opens, NO_FILE_OPENS) DEF_SHOW_FUNC(no_file_errors, NO_FILE_ERRORS) -DEF_SHOW_FUNC(l0_slowdown_micros, STALL_L0_SLOWDOWN_MICROS) -DEF_SHOW_FUNC(memtable_compaction_micros, STALL_MEMTABLE_COMPACTION_MICROS) -DEF_SHOW_FUNC(l0_num_files_stall_micros, STALL_L0_NUM_FILES_MICROS) +DEF_SHOW_FUNC(stall_micros, STALL_MICROS) DEF_SHOW_FUNC(rate_limit_delay_millis, RATE_LIMIT_DELAY_MILLIS) DEF_SHOW_FUNC(num_iterators, NO_ITERATORS) DEF_SHOW_FUNC(number_multiget_get, NUMBER_MULTIGET_CALLS) @@ -9929,11 +10822,28 @@ static void myrocks_update_status() { export_stats.rows_read = global_stats.rows[ROWS_READ]; export_stats.rows_updated = global_stats.rows[ROWS_UPDATED]; export_stats.rows_deleted_blind = global_stats.rows[ROWS_DELETED_BLIND]; + export_stats.rows_expired = global_stats.rows[ROWS_EXPIRED]; export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED]; export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED]; export_stats.system_rows_read = global_stats.system_rows[ROWS_READ]; export_stats.system_rows_updated = global_stats.system_rows[ROWS_UPDATED]; + + export_stats.queries_point = global_stats.queries[QUERIES_POINT]; + export_stats.queries_range = global_stats.queries[QUERIES_RANGE]; +} + +static void myrocks_update_memory_status() { + std::vector dbs; + std::unordered_set cache_set; + dbs.push_back(rdb); + std::map temp_usage_by_type; + rocksdb::MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set, + &temp_usage_by_type); + memory_stats.memtable_total = + temp_usage_by_type[rocksdb::MemoryUtil::kMemTableTotal]; + memory_stats.memtable_unflushed = + temp_usage_by_type[rocksdb::MemoryUtil::kMemTableUnFlushed]; } static SHOW_VAR myrocks_status_variables[] = { @@ -9944,8 +10854,10 @@ static SHOW_VAR myrocks_status_variables[] = { DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated, SHOW_LONGLONG), - DEF_STATUS_VAR_FUNC("rows_deleted_blind", - &export_stats.rows_deleted_blind, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_deleted_blind", &export_stats.rows_deleted_blind, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("rows_expired", &export_stats.rows_expired, + SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_deleted", &export_stats.system_rows_deleted, SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_inserted", @@ -9954,11 +10866,20 @@ static SHOW_VAR myrocks_status_variables[] = { SHOW_LONGLONG), DEF_STATUS_VAR_FUNC("system_rows_updated", &export_stats.system_rows_updated, SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("memtable_total", &memory_stats.memtable_total, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("memtable_unflushed", &memory_stats.memtable_unflushed, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("queries_point", &export_stats.queries_point, + SHOW_LONGLONG), + DEF_STATUS_VAR_FUNC("queries_range", &export_stats.queries_range, + SHOW_LONGLONG), {NullS, NullS, SHOW_LONG}}; static void show_myrocks_vars(THD *thd, SHOW_VAR *var, char *buff) { myrocks_update_status(); + myrocks_update_memory_status(); var->type = SHOW_ARRAY; var->value = reinterpret_cast(&myrocks_status_variables); } @@ -9987,9 +10908,7 @@ static SHOW_VAR rocksdb_status_vars[] = { DEF_STATUS_VAR(no_file_closes), DEF_STATUS_VAR(no_file_opens), DEF_STATUS_VAR(no_file_errors), - DEF_STATUS_VAR(l0_slowdown_micros), - DEF_STATUS_VAR(memtable_compaction_micros), - DEF_STATUS_VAR(l0_num_files_stall_micros), + DEF_STATUS_VAR(stall_micros), DEF_STATUS_VAR(rate_limit_delay_millis), DEF_STATUS_VAR(num_iterators), DEF_STATUS_VAR(number_multiget_get), @@ -10080,8 +10999,8 @@ void Rdb_background_thread::run() { clock_gettime(CLOCK_REALTIME, &ts); // Flush the WAL. - if (rdb && rocksdb_background_sync) { - DBUG_ASSERT(!rocksdb_db_options.allow_mmap_writes); + if (rdb && (rocksdb_flush_log_at_trx_commit == 2)) { + DBUG_ASSERT(!rocksdb_db_options->allow_mmap_writes); const rocksdb::Status s = rdb->SyncWAL(); if (!s.ok()) { rdb_handle_io_error(s, RDB_IO_ERROR_BG_THREAD); @@ -10117,7 +11036,7 @@ void Rdb_background_thread::run() { */ bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd, const rocksdb::Slice &eq_cond, - const bool use_all_keys, bool is_ascending) { + const bool use_all_keys) { bool can_use = false; if (THDVAR(thd, skip_bloom_filter_on_read)) { @@ -10168,8 +11087,35 @@ rocksdb::TransactionDB *rdb_get_rocksdb_db() { return rdb; } Rdb_cf_manager &rdb_get_cf_manager() { return cf_manager; } -rocksdb::BlockBasedTableOptions &rdb_get_table_options() { - return rocksdb_tbl_options; +const rocksdb::BlockBasedTableOptions &rdb_get_table_options() { + return *rocksdb_tbl_options; +} + +bool rdb_is_ttl_enabled() { return rocksdb_enable_ttl; } +bool rdb_is_ttl_read_filtering_enabled() { + return rocksdb_enable_ttl_read_filtering; +} +#ifndef NDEBUG +int rdb_dbug_set_ttl_rec_ts() { return rocksdb_debug_ttl_rec_ts; } +int rdb_dbug_set_ttl_snapshot_ts() { return rocksdb_debug_ttl_snapshot_ts; } +int rdb_dbug_set_ttl_read_filter_ts() { + return rocksdb_debug_ttl_read_filter_ts; +} +#endif + +void rdb_update_global_stats(const operation_type &type, uint count, + bool is_system_table) { + DBUG_ASSERT(type < ROWS_MAX); + + if (count == 0) { + return; + } + + if (is_system_table) { + global_stats.system_rows[type].add(count); + } else { + global_stats.rows[type].add(count); + } } int rdb_get_table_perf_counters(const char *const tablename, @@ -10180,7 +11126,7 @@ int rdb_get_table_perf_counters(const char *const tablename, Rdb_table_handler *table_handler; table_handler = rdb_open_tables.get_table_handler(tablename); if (table_handler == nullptr) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_INVALID_TABLE; } counters->load(table_handler->m_table_perf_context); @@ -10222,30 +11168,18 @@ void rdb_handle_io_error(const rocksdb::Status status, switch (err_type) { case RDB_IO_ERROR_TX_COMMIT: case RDB_IO_ERROR_DICT_COMMIT: { - /* NO_LINT_DEBUG */ - sql_print_error("MyRocks: failed to write to WAL. Error type = %s, " - "status code = %d, status = %s", - get_rdb_io_error_string(err_type), status.code(), - status.ToString().c_str()); + rdb_log_status_error(status, "failed to write to WAL"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on WAL write error."); abort_with_stack_traces(); break; } case RDB_IO_ERROR_BG_THREAD: { - /* NO_LINT_DEBUG */ - sql_print_warning("MyRocks: BG thread failed to write to RocksDB. " - "Error type = %s, status code = %d, status = %s", - get_rdb_io_error_string(err_type), status.code(), - status.ToString().c_str()); + rdb_log_status_error(status, "BG thread failed to write to RocksDB"); break; } case RDB_IO_ERROR_GENERAL: { - /* NO_LINT_DEBUG */ - sql_print_error("MyRocks: failed on I/O. Error type = %s, " - "status code = %d, status = %s", - get_rdb_io_error_string(err_type), status.code(), - status.ToString().c_str()); + rdb_log_status_error(status, "failed on I/O"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on I/O error."); abort_with_stack_traces(); @@ -10256,33 +11190,21 @@ void rdb_handle_io_error(const rocksdb::Status status, break; } } else if (status.IsCorruption()) { - /* NO_LINT_DEBUG */ - sql_print_error("MyRocks: data corruption detected! Error type = %s, " - "status code = %d, status = %s", - get_rdb_io_error_string(err_type), status.code(), - status.ToString().c_str()); + rdb_log_status_error(status, "data corruption detected!"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting because of data corruption."); abort_with_stack_traces(); } else if (!status.ok()) { switch (err_type) { case RDB_IO_ERROR_DICT_COMMIT: { - /* NO_LINT_DEBUG */ - sql_print_error("MyRocks: failed to write to WAL (dictionary). " - "Error type = %s, status code = %d, status = %s", - get_rdb_io_error_string(err_type), status.code(), - status.ToString().c_str()); + rdb_log_status_error(status, "Failed to write to WAL (dictionary)"); /* NO_LINT_DEBUG */ sql_print_error("MyRocks: aborting on WAL write error."); abort_with_stack_traces(); break; } default: - /* NO_LINT_DEBUG */ - sql_print_warning("MyRocks: failed to read/write in RocksDB. " - "Error type = %s, status code = %d, status = %s", - get_rdb_io_error_string(err_type), status.code(), - status.ToString().c_str()); + rdb_log_status_error(status, "Failed to read/write in RocksDB"); break; } } @@ -10363,13 +11285,41 @@ void rocksdb_set_rate_limiter_bytes_per_sec( } } +void rocksdb_set_sst_mgr_rate_bytes_per_sec( + my_core::THD *const thd, + my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)), + void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + const uint64_t new_val = *static_cast(save); + + if (new_val != rocksdb_sst_mgr_rate_bytes_per_sec) { + rocksdb_sst_mgr_rate_bytes_per_sec = new_val; + + rocksdb_db_options->sst_file_manager->SetDeleteRateBytesPerSecond( + rocksdb_sst_mgr_rate_bytes_per_sec); + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save) { + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint64_t new_val = *static_cast(save); if (rocksdb_delayed_write_rate != new_val) { rocksdb_delayed_write_rate = new_val; - rocksdb_db_options.delayed_write_rate = new_val; + rocksdb::Status s = + rdb->SetDBOptions({{"delayed_write_rate", std::to_string(new_val)}}); + + if (!s.ok()) { + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: failed to update delayed_write_rate. " + "status code = %d, status = %s", + s.code(), s.ToString().c_str()); + } } + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } void rdb_set_collation_exception_list(const char *const exception_list) { @@ -10411,18 +11361,132 @@ void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var *static_cast(var_ptr) = *static_cast(save); } -static void rocksdb_set_max_background_compactions( - THD *thd, struct st_mysql_sys_var *const var, void *const var_ptr, - const void *const save) { +static void rocksdb_set_max_background_jobs(THD *thd, + struct st_mysql_sys_var *const var, + void *const var_ptr, + const void *const save) { DBUG_ASSERT(save != nullptr); + DBUG_ASSERT(rocksdb_db_options != nullptr); + DBUG_ASSERT(rocksdb_db_options->env != nullptr); RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); - rocksdb_db_options.max_background_compactions = - *static_cast(save); - rocksdb_db_options.env->SetBackgroundThreads( - rocksdb_db_options.max_background_compactions, - rocksdb::Env::Priority::LOW); + const int new_val = *static_cast(save); + + if (rocksdb_db_options->max_background_jobs != new_val) { + rocksdb_db_options->max_background_jobs = new_val; + rocksdb::Status s = + rdb->SetDBOptions({{"max_background_jobs", std::to_string(new_val)}}); + + if (!s.ok()) { + /* NO_LINT_DEBUG */ + sql_print_warning("MyRocks: failed to update max_background_jobs. " + "Status code = %d, status = %s.", + s.code(), s.ToString().c_str()); + } + } + + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); +} + +void rocksdb_set_update_cf_options(THD *const /* unused */, + struct st_mysql_sys_var *const /* unused */, + void *const var_ptr, + const void *const save) { + const char *const val = *static_cast(save); + + if (!val) { + // NO_LINT_DEBUG + sql_print_warning("MyRocks: NULL is not a valid option for updates to " + "column family settings."); + return; + } + + RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); + + DBUG_ASSERT(val != nullptr); + + // Do the real work of applying the changes. + Rdb_cf_options::Name_to_config_t option_map; + + // Basic sanity checking and parsing the options into a map. If this fails + // then there's no point to proceed. + if (!Rdb_cf_options::parse_cf_options(val, &option_map)) { + *reinterpret_cast(var_ptr) = nullptr; + + // NO_LINT_DEBUG + sql_print_warning("MyRocks: failed to parse the updated column family " + "options = '%s'.", val); + RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); + return; + } + + // For each CF we have, see if we need to update any settings. + for (const auto &cf_name : cf_manager.get_cf_names()) { + DBUG_ASSERT(!cf_name.empty()); + + rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); + DBUG_ASSERT(cfh != nullptr); + + const auto it = option_map.find(cf_name); + std::string per_cf_options = (it != option_map.end()) ? it->second : ""; + + if (!per_cf_options.empty()) { + Rdb_cf_options::Name_to_config_t opt_map; + rocksdb::Status s = rocksdb::StringToMap(per_cf_options, &opt_map); + + if (s != rocksdb::Status::OK()) { + // NO_LINT_DEBUG + sql_print_warning("MyRocks: failed to convert the options for column " + "family '%s' to a map. %s", cf_name.c_str(), + s.ToString().c_str()); + } else { + DBUG_ASSERT(rdb != nullptr); + + // Finally we can apply the options. + s = rdb->SetOptions(cfh, opt_map); + + if (s != rocksdb::Status::OK()) { + // NO_LINT_DEBUG + sql_print_warning("MyRocks: failed to apply the options for column " + "family '%s'. %s", cf_name.c_str(), + s.ToString().c_str()); + } else { + // NO_LINT_DEBUG + sql_print_information("MyRocks: options for column family '%s' " + "have been successfully updated.", + cf_name.c_str()); + + // Make sure that data is internally consistent as well and update + // the CF options. This is necessary also to make sure that the CF + // options will be correctly reflected in the relevant table: + // ROCKSDB_CF_OPTIONS in INFORMATION_SCHEMA. + rocksdb::ColumnFamilyOptions cf_options = rdb->GetOptions(cfh); + std::string updated_options; + + s = rocksdb::GetStringFromColumnFamilyOptions(&updated_options, + cf_options); + + DBUG_ASSERT(s == rocksdb::Status::OK()); + DBUG_ASSERT(!updated_options.empty()); + + cf_manager.update_options_map(cf_name, updated_options); + } + } + } + } + + // Reset the pointers regardless of how much success we had with updating + // the CF options. This will results in consistent behavior and avoids + // dealing with cases when only a subset of CF-s was successfully updated. + if (val) { + *reinterpret_cast(var_ptr) = my_strdup(val, MYF(0)); + } else { + *reinterpret_cast(var_ptr) = nullptr; + } + + // Our caller (`plugin_var_memalloc_global_update`) will call `my_free` to + // free up resources used before. RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index afb9edd9e1f26..e6d4c2e648577 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -34,6 +34,7 @@ /* RocksDB header files */ #include "rocksdb/cache.h" #include "rocksdb/perf_context.h" +#include "rocksdb/sst_file_manager.h" #include "rocksdb/statistics.h" #include "rocksdb/utilities/options_util.h" #include "rocksdb/utilities/transaction_db.h" @@ -42,6 +43,7 @@ /* MyRocks header files */ #include "./rdb_comparator.h" #include "./rdb_index_merge.h" +#include "./rdb_io_watchdog.h" #include "./rdb_perf_context.h" #include "./rdb_sst_info.h" #include "./rdb_utils.h" @@ -93,12 +95,12 @@ std::vector rdb_get_all_trx_info(); - the name used to set the default column family parameter for per-cf arguments. */ -const char *const DEFAULT_CF_NAME = "default"; +extern const std::string DEFAULT_CF_NAME; /* This is the name of the Column Family used for storing the data dictionary. */ -const char *const DEFAULT_SYSTEM_CF_NAME = "__system__"; +extern const std::string DEFAULT_SYSTEM_CF_NAME; /* This is the name of the hidden primary key for tables with no pk. @@ -107,9 +109,9 @@ const char *const HIDDEN_PK_NAME = "HIDDEN_PK_ID"; /* Column family name which means "put this index into its own column family". - See Rdb_cf_manager::get_per_index_cf_name(). + DEPRECATED!!! */ -const char *const PER_INDEX_CF_NAME = "$per_index_cf"; +extern const std::string PER_INDEX_CF_NAME; /* Name for the background thread. @@ -135,7 +137,7 @@ const char RDB_PER_PARTITION_QUALIFIER_NAME_SEP = '_'; - p0_cfname=foo - p3_tts_col=bar */ -const char RDB_PER_PARTITION_QUALIFIER_VALUE_SEP = '='; +const char RDB_QUALIFIER_VALUE_SEP = '='; /* Separator between multiple qualifier assignments. Sample usage: @@ -149,6 +151,16 @@ const char RDB_QUALIFIER_SEP = ';'; */ const char *const RDB_CF_NAME_QUALIFIER = "cfname"; +/* + Qualifier name for a custom per partition ttl duration. +*/ +const char *const RDB_TTL_DURATION_QUALIFIER = "ttl_duration"; + +/* + Qualifier name for a custom per partition ttl duration. +*/ +const char *const RDB_TTL_COL_QUALIFIER = "ttl_col"; + /* Default, minimal valid, and maximum valid sampling rate values when collecting statistics about table. @@ -177,12 +189,16 @@ const char *const RDB_CF_NAME_QUALIFIER = "cfname"; CPU-s and derive the values from there. This however has its own set of problems and we'll choose simplicity for now. */ -#define MAX_BACKGROUND_COMPACTIONS 64 -#define MAX_BACKGROUND_FLUSHES 64 +#define MAX_BACKGROUND_JOBS 64 #define DEFAULT_SUBCOMPACTIONS 1 #define MAX_SUBCOMPACTIONS 64 +/* + Default value for rocksdb_sst_mgr_rate_bytes_per_sec = 0 (disabled). +*/ +#define DEFAULT_SST_MGR_RATE_BYTES_PER_SEC 0 + /* Defines the field sizes for serializing XID object to a string representation. string byte format: [field_size: field_value, ...] @@ -214,19 +230,50 @@ const char *const RDB_CF_NAME_QUALIFIER = "cfname"; #define ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN sizeof(longlong) /* - MyRocks specific error codes. NB! Please make sure that you will update - HA_ERR_ROCKSDB_LAST when adding new ones. + Bytes used to store TTL, in the beginning of all records for tables with TTL + enabled. */ -#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1) -#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2) -#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3) -#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 4) -#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 5) -#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED +#define ROCKSDB_SIZEOF_TTL_RECORD sizeof(longlong) -inline bool looks_like_per_index_cf_typo(const char *const name) { - return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME)); -} +/* + MyRocks specific error codes. NB! Please make sure that you will update + HA_ERR_ROCKSDB_LAST when adding new ones. Also update the strings in + rdb_error_messages to include any new error messages. +*/ +#define HA_ERR_ROCKSDB_FIRST (HA_ERR_LAST + 1) +#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_ROCKSDB_FIRST + 0) +#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED \ + (HA_ERR_ROCKSDB_FIRST + 1) +#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED \ + (HA_ERR_ROCKSDB_FIRST + 2) +#define HA_ERR_ROCKSDB_COMMIT_FAILED (HA_ERR_ROCKSDB_FIRST + 3) +#define HA_ERR_ROCKSDB_BULK_LOAD (HA_ERR_ROCKSDB_FIRST + 4) +#define HA_ERR_ROCKSDB_CORRUPT_DATA (HA_ERR_ROCKSDB_FIRST + 5) +#define HA_ERR_ROCKSDB_CHECKSUM_MISMATCH (HA_ERR_ROCKSDB_FIRST + 6) +#define HA_ERR_ROCKSDB_INVALID_TABLE (HA_ERR_ROCKSDB_FIRST + 7) +#define HA_ERR_ROCKSDB_PROPERTIES (HA_ERR_ROCKSDB_FIRST + 8) +#define HA_ERR_ROCKSDB_MERGE_FILE_ERR (HA_ERR_ROCKSDB_FIRST + 9) +/* + Each error code below maps to a RocksDB status code found in: + rocksdb/include/rocksdb/status.h +*/ +#define HA_ERR_ROCKSDB_STATUS_NOT_FOUND (HA_ERR_LAST + 10) +#define HA_ERR_ROCKSDB_STATUS_CORRUPTION (HA_ERR_LAST + 11) +#define HA_ERR_ROCKSDB_STATUS_NOT_SUPPORTED (HA_ERR_LAST + 12) +#define HA_ERR_ROCKSDB_STATUS_INVALID_ARGUMENT (HA_ERR_LAST + 13) +#define HA_ERR_ROCKSDB_STATUS_IO_ERROR (HA_ERR_LAST + 14) +#define HA_ERR_ROCKSDB_STATUS_NO_SPACE (HA_ERR_LAST + 15) +#define HA_ERR_ROCKSDB_STATUS_MERGE_IN_PROGRESS (HA_ERR_LAST + 16) +#define HA_ERR_ROCKSDB_STATUS_INCOMPLETE (HA_ERR_LAST + 17) +#define HA_ERR_ROCKSDB_STATUS_SHUTDOWN_IN_PROGRESS (HA_ERR_LAST + 18) +#define HA_ERR_ROCKSDB_STATUS_TIMED_OUT (HA_ERR_LAST + 19) +#define HA_ERR_ROCKSDB_STATUS_ABORTED (HA_ERR_LAST + 20) +#define HA_ERR_ROCKSDB_STATUS_LOCK_LIMIT (HA_ERR_LAST + 21) +#define HA_ERR_ROCKSDB_STATUS_BUSY (HA_ERR_LAST + 22) +#define HA_ERR_ROCKSDB_STATUS_DEADLOCK (HA_ERR_LAST + 23) +#define HA_ERR_ROCKSDB_STATUS_EXPIRED (HA_ERR_LAST + 24) +#define HA_ERR_ROCKSDB_STATUS_TRY_AGAIN (HA_ERR_LAST + 25) +#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_STATUS_TRY_AGAIN /** @brief @@ -239,6 +286,8 @@ struct Rdb_table_handler { char *m_table_name; uint m_table_name_length; int m_ref_count; + atomic_stat m_lock_wait_timeout_counter; + atomic_stat m_deadlock_counter; my_core::THR_LOCK m_thr_lock; ///< MySQL latch needed by m_db_lock @@ -283,15 +332,19 @@ typedef struct _gl_index_id_s { } } GL_INDEX_ID; -enum operation_type { +enum operation_type : int { ROWS_DELETED = 0, ROWS_INSERTED, ROWS_READ, ROWS_UPDATED, ROWS_DELETED_BLIND, + ROWS_EXPIRED, + ROWS_HIDDEN_NO_SNAPSHOT, ROWS_MAX }; +enum query_type : int { QUERIES_POINT = 0, QUERIES_RANGE, QUERIES_MAX }; + #if defined(HAVE_SCHED_GETCPU) #define RDB_INDEXER get_sched_indexer_t #else @@ -305,6 +358,8 @@ struct st_global_stats { // system_rows_ stats are only for system // tables. They are not counted in rows_* stats. ib_counter_t system_rows[ROWS_MAX]; + + ib_counter_t queries[QUERIES_MAX]; }; /* Struct used for exporting status to MySQL */ @@ -314,11 +369,22 @@ struct st_export_stats { ulonglong rows_read; ulonglong rows_updated; ulonglong rows_deleted_blind; + ulonglong rows_expired; + ulonglong rows_hidden_no_snapshot; ulonglong system_rows_deleted; ulonglong system_rows_inserted; ulonglong system_rows_read; ulonglong system_rows_updated; + + ulonglong queries_point; + ulonglong queries_range; +}; + +/* Struct used for exporting RocksDB memory status */ +struct st_memory_stats { + ulonglong memtable_total; + ulonglong memtable_unflushed; }; } // namespace myrocks @@ -425,6 +491,11 @@ class ha_rocksdb : public my_core::handler { */ uchar *m_pack_buffer; + /* + Pointer to the original TTL timestamp value (8 bytes) during UPDATE. + */ + char m_ttl_bytes[ROCKSDB_SIZEOF_TTL_RECORD]; + /* rowkey of the last record we've read, in StorageFormat. */ String m_last_rowkey; @@ -490,14 +561,14 @@ class ha_rocksdb : public my_core::handler { void setup_iterator_for_rnd_scan(); void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *const slice) MY_ATTRIBUTE((__nonnull__)) { - setup_scan_iterator(kd, slice, false, false, 0); + setup_scan_iterator(kd, slice, false, 0); } bool is_ascending(const Rdb_key_def &keydef, enum ha_rkey_function find_flag) const MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice, - const bool use_all_keys, const bool is_ascending, - const uint eq_cond_len) MY_ATTRIBUTE((__nonnull__)); + const bool use_all_keys, const uint eq_cond_len) + MY_ATTRIBUTE((__nonnull__)); void release_scan_iterator(void); rocksdb::Status @@ -506,13 +577,15 @@ class ha_rocksdb : public my_core::handler { const rocksdb::Slice &key, std::string *const value) const; int get_row_by_rowid(uchar *const buf, const char *const rowid, - const uint rowid_size, const bool skip_lookup = false) + const uint rowid_size, const bool skip_lookup = false, + const bool skip_ttl_check = true) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int get_row_by_rowid(uchar *const buf, const uchar *const rowid, - const uint rowid_size, const bool skip_lookup = false) + const uint rowid_size, const bool skip_lookup = false, + const bool skip_ttl_check = true) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) { return get_row_by_rowid(buf, reinterpret_cast(rowid), - rowid_size, skip_lookup); + rowid_size, skip_lookup, skip_ttl_check); } void update_auto_incr_val(); @@ -724,14 +797,6 @@ class ha_rocksdb : public my_core::handler { uchar *const buf) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - void convert_record_to_storage_format(const rocksdb::Slice &pk_packed_slice, - Rdb_string_writer *const pk_unpack_info, - rocksdb::Slice *const packed_rec) - MY_ATTRIBUTE((__nonnull__)); - - static const std::string gen_cf_name_qualifier_for_partition( - const std::string &s); - static const std::vector parse_into_tokens(const std::string &s, const char delim); @@ -750,6 +815,9 @@ class ha_rocksdb : public my_core::handler { const Rdb_tbl_def *const tbl_def_arg) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + static const std::string get_table_comment(const TABLE *const table_arg) + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + static bool is_hidden_pk(const uint index, const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); @@ -884,7 +952,6 @@ class ha_rocksdb : public my_core::handler { struct key_def_cf_info { rocksdb::ColumnFamilyHandle *cf_handle; bool is_reverse_cf; - bool is_auto_cf; bool is_per_partition_cf; }; @@ -894,12 +961,23 @@ class ha_rocksdb : public my_core::handler { const uchar *old_data; rocksdb::Slice new_pk_slice; rocksdb::Slice old_pk_slice; + rocksdb::Slice old_pk_rec; // "unpack_info" data for the new PK value Rdb_string_writer *new_pk_unpack_info; longlong hidden_pk_id; bool skip_unique_check; + + // In certain cases, TTL is enabled on a table, as well as an explicit TTL + // column. The TTL column can be part of either the key or the value part + // of the record. If it is part of the key, we store the offset here. + // + // Later on, we use this offset to store the TTL in the value part of the + // record, which we can then access in the compaction filter. + // + // Set to UINT_MAX by default to indicate that the TTL is not in key. + uint ttl_pk_offset = UINT_MAX; }; /* @@ -955,6 +1033,17 @@ class ha_rocksdb : public my_core::handler { const KEY *const new_key) const; MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + int compare_keys(const KEY *const old_key, const KEY *const new_key) const + MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); + + int convert_record_to_storage_format(const struct update_row_info &row_info, + rocksdb::Slice *const packed_rec) + MY_ATTRIBUTE((__nonnull__)); + + bool should_hide_ttl_rec(const rocksdb::Slice &ttl_rec_val, + const int64_t curr_ts) + MY_ATTRIBUTE((__warn_unused_result__)); + int index_first_intern(uchar *buf) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int index_last_intern(uchar *buf) @@ -975,7 +1064,7 @@ class ha_rocksdb : public my_core::handler { MY_ATTRIBUTE((__warn_unused_result__)); int check_and_lock_sk(const uint &key_id, const struct update_row_info &row_info, - bool *const found) const + bool *const found) MY_ATTRIBUTE((__warn_unused_result__)); int check_uniqueness_and_lock(const struct update_row_info &row_info, bool *const pk_changed) @@ -1000,18 +1089,22 @@ class ha_rocksdb : public my_core::handler { int read_key_exact(const Rdb_key_def &kd, rocksdb::Iterator *const iter, const bool &using_full_key, - const rocksdb::Slice &key_slice) const + const rocksdb::Slice &key_slice, + const int64_t ttl_filter_ts) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int read_before_key(const Rdb_key_def &kd, const bool &using_full_key, - const rocksdb::Slice &key_slice) + const rocksdb::Slice &key_slice, + const int64_t ttl_filter_ts) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); - int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice) + int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice, + const int64_t ttl_filter_ts) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); int position_to_correct_key( const Rdb_key_def &kd, const enum ha_rkey_function &find_flag, const bool &full_key_match, const uchar *const key, const key_part_map &keypart_map, const rocksdb::Slice &key_slice, - bool *const move_forward) MY_ATTRIBUTE((__warn_unused_result__)); + bool *const move_forward, const int64_t ttl_filter_ts) + MY_ATTRIBUTE((__warn_unused_result__)); int read_row_from_primary_key(uchar *const buf) MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)); @@ -1091,7 +1184,7 @@ class ha_rocksdb : public my_core::handler { int check(THD *const thd, HA_CHECK_OPT *const check_opt) override MY_ATTRIBUTE((__warn_unused_result__)); - void remove_rows(Rdb_tbl_def *const tbl); + int remove_rows(Rdb_tbl_def *const tbl); ha_rows records_in_range(uint inx, key_range *const min_key, key_range *const max_key) override MY_ATTRIBUTE((__warn_unused_result__)); @@ -1121,6 +1214,10 @@ class ha_rocksdb : public my_core::handler { bool get_error_message(const int error, String *const buf) override MY_ATTRIBUTE((__nonnull__)); + static int rdb_error_to_mysql(const rocksdb::Status &s, + const char *msg = nullptr) + MY_ATTRIBUTE((__warn_unused_result__)); + void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong nb_desired_values, ulonglong *const first_value, @@ -1223,5 +1320,4 @@ struct Rdb_inplace_alter_ctx : public my_core::inplace_alter_handler_ctx { Rdb_inplace_alter_ctx(const Rdb_inplace_alter_ctx &); Rdb_inplace_alter_ctx &operator=(const Rdb_inplace_alter_ctx &); }; - } // namespace myrocks diff --git a/storage/rocksdb/ha_rocksdb_proto.h b/storage/rocksdb/ha_rocksdb_proto.h index b30585f5d3797..1beb6b80622bd 100644 --- a/storage/rocksdb/ha_rocksdb_proto.h +++ b/storage/rocksdb/ha_rocksdb_proto.h @@ -69,7 +69,18 @@ rocksdb::TransactionDB *rdb_get_rocksdb_db(); class Rdb_cf_manager; Rdb_cf_manager &rdb_get_cf_manager(); -rocksdb::BlockBasedTableOptions &rdb_get_table_options(); +const rocksdb::BlockBasedTableOptions &rdb_get_table_options(); +bool rdb_is_ttl_enabled(); +bool rdb_is_ttl_read_filtering_enabled(); +#ifndef NDEBUG +int rdb_dbug_set_ttl_rec_ts(); +int rdb_dbug_set_ttl_snapshot_ts(); +int rdb_dbug_set_ttl_read_filter_ts(); +#endif + +enum operation_type : int; +void rdb_update_global_stats(const operation_type &type, uint count, + bool is_system_table = false); class Rdb_dict_manager; Rdb_dict_manager *rdb_get_dict_manager(void) @@ -82,5 +93,4 @@ Rdb_ddl_manager *rdb_get_ddl_manager(void) class Rdb_binlog_manager; Rdb_binlog_manager *rdb_get_binlog_manager(void) MY_ATTRIBUTE((__warn_unused_result__)); - } // namespace myrocks diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result index a7d381fbdb1a9..f5a2b28ee421d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result @@ -394,3 +394,46 @@ select 1300 < 1300 * 1.5 as "same"; same 1 DROP TABLE t1; +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b INT, +c INT, +KEY kbc(b,c)) ENGINE = ROCKSDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,1); +INSERT INTO t1 (a,b,c) VALUES (2,2,2); +INSERT INTO t1 (a,b,c) VALUES (3,3,3); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kbc` (`b`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 DROP INDEX kbc, ADD INDEX kbc(b,c), ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX kbc; +DROP TABLE t1; +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b varchar(10), +index kb(b(5)) +) ENGINE = ROCKSDB charset utf8 collate utf8_bin; +INSERT INTO t1 (a,b) VALUES (1,'1111122222'); +INSERT INTO t1 (a,b) VALUES (2,'2222233333'); +INSERT INTO t1 (a,b) VALUES (3,'3333344444'); +ALTER TABLE t1 DROP INDEX kb, ADD INDEX kb(b(8)), ALGORITHM=INPLACE; +SELECT * FROM t1 FORCE INDEX(kb); +a b +1 1111122222 +2 2222233333 +3 3333344444 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` varchar(10) COLLATE utf8_bin DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `kb` (`b`(8)) +) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result index dbd22a9f1f4ce..6f3ca8f1706a4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/add_unique_index_inplace.result @@ -79,11 +79,25 @@ INSERT INTO t1 (a, b) VALUES (1, 5); INSERT INTO t1 (a, b) VALUES (2, 6); INSERT INTO t1 (a, b) VALUES (3, 7); ALTER TABLE t1 ADD UNIQUE INDEX kb(b); -ERROR HY000: Unique index support is disabled when the table has no primary key. +INSERT INTO t1 (a, b) VALUES (4, 8); +INSERT INTO t1 (a, b) VALUES (5, 5); +ERROR 23000: Duplicate entry '5' for key 'kb' SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, - `b` int(11) DEFAULT NULL + `b` int(11) DEFAULT NULL, + UNIQUE KEY `kb` (`b`) ) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 DROP TABLE t1; +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b INT, +c INT, +KEY kbc(b,c)) ENGINE = ROCKSDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,1); +INSERT INTO t1 (a,b,c) VALUES (2,2,2); +INSERT INTO t1 (a,b,c) VALUES (3,2,2); +ALTER TABLE t1 DROP INDEX kbc, ADD UNIQUE INDEX kbc(b,c), ALGORITHM=INPLACE; +ERROR 23000: Duplicate entry '2-2' for key 'kbc' +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result index 34a14ff39d82c..d86792a64692d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result @@ -229,7 +229,20 @@ Table Op Msg_type Msg_text test.t1 check status OK DROP TABLE t1, t2; CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb; -ERROR HY000: Unique index support is disabled when the table has no primary key. +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +INSERT INTO t1 (a,b) VALUES (1,'c'); +ERROR 23000: Duplicate entry '1' for key 'a' +SELECT * FROM t1; +a b +1 a +2 b +SELECT * FROM t1 WHERE a = 2; +a b +2 b +EXPLAIN SELECT * FROM t1 WHERE a = 2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const a a 5 const 1 NULL +DROP TABLE t1; CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; SHOW CREATE TABLE t1; Table Create Table diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result index d65a4efea3002..10fc8446d6cac 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result @@ -817,6 +817,404 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'rev:cf_short_prefix', +index id2 (id2) COMMENT 'rev:cf_short_prefix', +index id2_id1 (id2, id1) COMMENT 'rev:cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_short_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_short_prefix', +index id3_id2 (id3, id2) COMMENT 'rev:cf_short_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'rev:cf_short_prefix', +index id2 (id2) COMMENT 'rev:cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_short_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'rev:cf_short_prefix', +index id3_id4 (id3, id4) COMMENT 'rev:cf_short_prefix', +index id3_id5 (id3, id5) COMMENT 'rev:cf_short_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +true +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix', index id2 (id2) COMMENT 'cf_long_prefix', index id2_id1 (id2, id1) COMMENT 'cf_long_prefix', @@ -833,13 +1231,411 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), -primary key (id4) COMMENT 'cf_long_prefix', -index id2 (id2) COMMENT 'cf_long_prefix', -index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', -index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', -index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', -index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', -index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +primary key (id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +true +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'rev:cf_long_prefix', +index id2 (id2) COMMENT 'rev:cf_long_prefix', +index id2_id1 (id2, id1) COMMENT 'rev:cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_long_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_long_prefix', +index id3_id2 (id3, id2) COMMENT 'rev:cf_long_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'rev:cf_long_prefix', +index id2 (id2) COMMENT 'rev:cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'rev:cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'rev:cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'rev:cf_long_prefix' ) engine=ROCKSDB; call bloom_start(); select count(*) from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result index af7feaf86821c..f20554a7bce9b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result @@ -817,6 +817,404 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'rev:cf_short_prefix', +index id2 (id2) COMMENT 'rev:cf_short_prefix', +index id2_id1 (id2, id1) COMMENT 'rev:cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_short_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_short_prefix', +index id3_id2 (id3, id2) COMMENT 'rev:cf_short_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'rev:cf_short_prefix', +index id2 (id2) COMMENT 'rev:cf_short_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_short_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_short_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'rev:cf_short_prefix', +index id3_id4 (id3, id4) COMMENT 'rev:cf_short_prefix', +index id3_id5 (id3, id5) COMMENT 'rev:cf_short_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), primary key (id1, id2, id3, id4) COMMENT 'cf_long_prefix', index id2 (id2) COMMENT 'cf_long_prefix', index id2_id1 (id2, id1) COMMENT 'cf_long_prefix', @@ -833,13 +1231,411 @@ id4 int not null, id5 int not null, value bigint, value2 varchar(100), -primary key (id4) COMMENT 'cf_long_prefix', -index id2 (id2) COMMENT 'cf_long_prefix', -index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', -index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', -index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', -index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', -index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +primary key (id4) COMMENT 'cf_long_prefix', +index id2 (id2) COMMENT 'cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' +) engine=ROCKSDB; +call bloom_start(); +select count(*) from t1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(PRIMARY) where id1 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2 >= 1; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index(id3_id4) where id3 >= '1'; +count(*) +10000 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=2 and id1=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=24 and id1=12; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=88 and id1=44; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=100 and id1=50; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index(id2_id1) where id2=428 and id1=214; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=1 and id4=1 and id5=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=23 and id4=115 and id5=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=500 and id4=2500 and id5=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4_id5) where id2=601 and id4=3005 and id5=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id4) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=1 and id3='1' and id1=1 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36 and id3='36' and id1=18 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124' and id1=62 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=888 and id3='888' and id1=444 order by id4; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=124 and id3='124'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=1 and id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2_id3) where id2=12 and id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=1 and id3='1'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id3_id2) where id2=23 and id3='23'; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=1; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=12; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=23; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=100; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (PRIMARY) where id1=234; +count(*) +10 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=36; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2_id3_id1_id4) where id2=234; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=1 and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=23 and id4=115; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=500 and id4=2500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id2) where id2=601 and id4=3005; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='1' and id4=1; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id4) where id3='12' and id4=60; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=1; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=23; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=345; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t1 force index (id2) where id2=456; +count(*) +5 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='100' and id5=500; +count(*) +1 +call bloom_end(); +checked +false +call bloom_start(); +select count(*) from t2 force index (id3_id5) where id3='240' and id5=1200; +count(*) +1 +call bloom_end(); +checked +false +drop table if exists t1; +drop table if exists t2; +create table t1 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id1, id2, id3, id4) COMMENT 'rev:cf_long_prefix', +index id2 (id2) COMMENT 'rev:cf_long_prefix', +index id2_id1 (id2, id1) COMMENT 'rev:cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_long_prefix', +index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_long_prefix', +index id3_id2 (id3, id2) COMMENT 'rev:cf_long_prefix' +) engine=ROCKSDB; +create table t2 ( +id1 bigint not null, +id2 bigint not null, +id3 varchar(100) not null, +id4 int not null, +id5 int not null, +value bigint, +value2 varchar(100), +primary key (id4) COMMENT 'rev:cf_long_prefix', +index id2 (id2) COMMENT 'rev:cf_long_prefix', +index id2_id3 (id2, id3) COMMENT 'rev:cf_long_prefix', +index id2_id4 (id2, id4) COMMENT 'rev:cf_long_prefix', +index id2_id4_id5 (id2, id4, id5) COMMENT 'rev:cf_long_prefix', +index id3_id4 (id3, id4) COMMENT 'rev:cf_long_prefix', +index id3_id5 (id3, id5) COMMENT 'rev:cf_long_prefix' ) engine=ROCKSDB; call bloom_start(); select count(*) from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result index d859c8551b20f..6e9be75aaaeed 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result @@ -1,8 +1,26 @@ DROP TABLE IF EXISTS t1, t2, t3; -CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; -CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; -CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' - PARTITION BY KEY() PARTITIONS 4; +Data will be ordered in ascending order +CREATE TABLE t1( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t2( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t3( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "cf1", +KEY(a) +) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result new file mode 100644 index 0000000000000..31562d1da1036 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result @@ -0,0 +1,19 @@ +CREATE TABLE t1(pk INT, PRIMARY KEY(pk)); +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(10); +INSERT INTO t1 VALUES(11); +INSERT INTO t1 VALUES(9); +ERROR HY000: Rows must be inserted in primary key order during bulk load operation +SET rocksdb_bulk_load=0; +SELECT * FROM t1; +pk +10 +11 +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(1); +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(20); +INSERT INTO t1 VALUES(21); +SET rocksdb_bulk_load=0; +ERROR HY000: Lost connection to MySQL server during query +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result new file mode 100644 index 0000000000000..a30838b9c9f2c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result @@ -0,0 +1,82 @@ +DROP TABLE IF EXISTS t1, t2, t3; +Data will be ordered in ascending order +CREATE TABLE t1( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "rev:cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t2( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "rev:cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t3( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "rev:cf1", +KEY(a) +) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t2; +LOAD DATA INFILE INTO TABLE t3; +set rocksdb_bulk_load=0; +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +ANALYZE TABLE t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +select count(pk) from t1; +count(pk) +5000000 +select count(a) from t1; +count(a) +5000000 +select count(b) from t1; +count(b) +5000000 +select count(pk) from t2; +count(pk) +5000000 +select count(a) from t2; +count(a) +5000000 +select count(b) from t2; +count(b) +5000000 +select count(pk) from t3; +count(pk) +5000000 +select count(a) from t3; +count(a) +5000000 +select count(b) from t3; +count(b) +5000000 +longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp +test.bulk_load.tmp +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result new file mode 100644 index 0000000000000..4d259b5ea2f2a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result @@ -0,0 +1,82 @@ +DROP TABLE IF EXISTS t1, t2, t3; +Data will be ordered in descending order +CREATE TABLE t1( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "rev:cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t2( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "rev:cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t3( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "rev:cf1", +KEY(a) +) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t2; +LOAD DATA INFILE INTO TABLE t3; +set rocksdb_bulk_load=0; +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +ANALYZE TABLE t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +select count(pk) from t1; +count(pk) +5000000 +select count(a) from t1; +count(a) +5000000 +select count(b) from t1; +count(b) +5000000 +select count(pk) from t2; +count(pk) +5000000 +select count(a) from t2; +count(a) +5000000 +select count(b) from t2; +count(b) +5000000 +select count(pk) from t3; +count(pk) +5000000 +select count(a) from t3; +count(a) +5000000 +select count(b) from t3; +count(b) +5000000 +longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp +test.bulk_load.tmp +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result new file mode 100644 index 0000000000000..d2d3befdf0469 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result @@ -0,0 +1,82 @@ +DROP TABLE IF EXISTS t1, t2, t3; +Data will be ordered in descending order +CREATE TABLE t1( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t2( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "cf1", +KEY(a) +) COLLATE 'latin1_bin'; +CREATE TABLE t3( +pk CHAR(5), +a CHAR(30), +b CHAR(30), +PRIMARY KEY(pk) COMMENT "cf1", +KEY(a) +) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 0 +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +STAT_TYPE VALUE +DB_NUM_SNAPSHOTS 1 +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +LOAD DATA INFILE INTO TABLE t1; +LOAD DATA INFILE INTO TABLE t2; +LOAD DATA INFILE INTO TABLE t3; +set rocksdb_bulk_load=0; +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +ANALYZE TABLE t1, t2, t3; +Table Op Msg_type Msg_text +test.t1 analyze status OK +test.t2 analyze status OK +test.t3 analyze status OK +SHOW TABLE STATUS WHERE name LIKE 't%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL +t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned +select count(pk) from t1; +count(pk) +5000000 +select count(a) from t1; +count(a) +5000000 +select count(b) from t1; +count(b) +5000000 +select count(pk) from t2; +count(pk) +5000000 +select count(a) from t2; +count(a) +5000000 +select count(b) from t2; +count(b) +5000000 +select count(pk) from t3; +count(pk) +5000000 +select count(a) from t3; +count(a) +5000000 +select count(b) from t3; +count(b) +5000000 +longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp +test.bulk_load.tmp +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result index d75a548e6ffc1..521edec0c83ff 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result @@ -5,7 +5,7 @@ connection con1; CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1105 +ERROR: 1938 connection con2; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE @@ -18,7 +18,7 @@ STAT_TYPE VALUE DB_NUM_SNAPSHOTS 0 connection con1; START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1105 +ERROR: 1938 connection con2; INSERT INTO t1 (a) VALUES (1); connection con1; @@ -69,7 +69,7 @@ id value value2 5 5 5 6 6 6 START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1105 +ERROR: 1938 connection con2; INSERT INTO r1 values (7,7,7); connection con1; @@ -107,12 +107,12 @@ id value value2 7 7 7 8 8 8 START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1105 +ERROR: 1938 connection con2; INSERT INTO r1 values (9,9,9); connection con1; START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1105 +ERROR: 1938 connection con2; INSERT INTO r1 values (10,10,10); connection con1; @@ -129,7 +129,7 @@ id value value2 9 9 9 10 10 10 START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1105 +ERROR: 1938 INSERT INTO r1 values (11,11,11); ERROR: 0 SELECT * FROM r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result index 7458e6b72c3fe..805a0aaa0fdd1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result @@ -125,7 +125,7 @@ id value value2 START TRANSACTION WITH CONSISTENT SNAPSHOT; ERROR: 0 INSERT INTO r1 values (11,11,11); -ERROR: 1105 +ERROR: 1935 SELECT * FROM r1; id value value2 1 1 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result index 20ac751f582e1..b7d0f99c71635 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result @@ -20,7 +20,7 @@ set @tmp1=@@rocksdb_verify_row_debug_checksums; set rocksdb_verify_row_debug_checksums=1; set session debug= "+d,myrocks_simulate_bad_row_read1"; select * from t1 where pk=1; -ERROR HY000: Got error 122 from storage engine +ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB set session debug= "-d,myrocks_simulate_bad_row_read1"; set rocksdb_verify_row_debug_checksums=@tmp1; select * from t1 where pk=1; @@ -28,11 +28,11 @@ pk col1 1 1 set session debug= "+d,myrocks_simulate_bad_row_read2"; select * from t1 where pk=1; -ERROR HY000: Got error 122 from storage engine +ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB set session debug= "-d,myrocks_simulate_bad_row_read2"; set session debug= "+d,myrocks_simulate_bad_row_read3"; select * from t1 where pk=1; -ERROR HY000: Got error 122 from storage engine +ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB set session debug= "-d,myrocks_simulate_bad_row_read3"; insert into t1 values(4,'0123456789'); select * from t1; @@ -56,7 +56,7 @@ pk col1 ABCD 1 set session debug= "+d,myrocks_simulate_bad_pk_read1"; select * from t2; -ERROR HY000: Got error 122 from storage engine +ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB set session debug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; create table t2 ( @@ -69,6 +69,6 @@ pk col1 ABCD 1 set session debug= "+d,myrocks_simulate_bad_pk_read1"; select * from t2; -ERROR HY000: Got error 122 from storage engine +ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB set session debug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result new file mode 100644 index 0000000000000..9b62cade9ca23 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result @@ -0,0 +1,51 @@ +set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout; set @prior_deadlock_detect = @@rocksdb_deadlock_detect; set global rocksdb_deadlock_detect = on; set global rocksdb_lock_wait_timeout = 100000;; +create table t (i int primary key) engine=rocksdb; +insert into t values (1), (2), (3); +begin; +select * from t where i=1 for update; +i +1 +begin; +select * from t where i=2 for update; +i +2 +select * from t where i=2 for update; +select * from t where i=1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +i +2 +rollback; +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; +row_lock_deadlocks +1 +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; +row_lock_deadlocks +1 +begin; +select * from t where i=1 for update; +i +1 +begin; +select * from t where i=2 for update; +i +2 +select * from t where i=2 for update; +select * from t where i=1 for update; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +rollback; +i +2 +rollback; +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; +row_lock_deadlocks +2 +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; +row_lock_deadlocks +2 +set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout; set global rocksdb_deadlock_detect = @prior_deadlock_detect;; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result index 7d0fae229da5c..7abe692c8ad0c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table.result @@ -52,20 +52,4 @@ drop table t5; set global rocksdb_compact_cf = 'cf1'; set global rocksdb_compact_cf = 'rev:cf2'; set global rocksdb_signal_drop_index_thread = 1; -Begin filtering dropped index+ 0 -Begin filtering dropped index+ 1 -Begin filtering dropped index+ 1 -Begin filtering dropped index+ 1 -Begin filtering dropped index+ 1 -Begin filtering dropped index+ 1 -Begin filtering dropped index+ 1 -Begin filtering dropped index+ 1 -Finished filtering dropped index+ 0 -Finished filtering dropped index+ 1 -Finished filtering dropped index+ 1 -Finished filtering dropped index+ 1 -Finished filtering dropped index+ 1 -Finished filtering dropped index+ 1 -Finished filtering dropped index+ 1 -Finished filtering dropped index+ 1 drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result index c46d3522dd7cd..53624a9899d3d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result @@ -45,9 +45,15 @@ primary key (a,b) comment 'cf1', key (b) comment 'rev:cf2' ) ENGINE=RocksDB; DELETE FROM t5; +set @@global.rocksdb_compact_cf = 'cf1'; +set @@global.rocksdb_compact_cf = 'rev:cf2'; +set @@global.rocksdb_compact_cf = 'default'; drop table t1; drop table t2; drop table t3; drop table t4; drop table t5; +set @@global.rocksdb_compact_cf = 'cf1'; +set @@global.rocksdb_compact_cf = 'rev:cf2'; +set @@global.rocksdb_compact_cf = 'default'; Compacted diff --git a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result index 483be726bb321..5ffd2774ca240 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result @@ -1,16 +1,16 @@ DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1 (b INT PRIMARY KEY); CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b)); -ERROR 42000: MyRocks does not currently support foreign key constraints +ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL); DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL); DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b)); -ERROR 42000: MyRocks does not currently support foreign key constraints +ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL); ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b); -ERROR 42000: MyRocks does not currently support foreign key constraints +ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL); ALTER TABLE t2 ADD bforeign INT NOT NULL; @@ -20,6 +20,6 @@ ALTER TABLE t2 ADD foreignkey INT NOT NULL; DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL); ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b); -ERROR 42000: MyRocks does not currently support foreign key constraints +ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' DROP TABLE t2; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result index e4d080289dc99..8bf2416aa788b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/hermitage.result @@ -483,7 +483,7 @@ delete from test where value = 20; connection con1; commit; connection con2; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction (snapshot conflict) select variable_value-@a from information_schema.global_status where variable_name='rocksdb_snapshot_conflict_errors'; variable_value-@a 1 @@ -511,7 +511,7 @@ update test set value = 12 where id = 1; connection con1; commit; connection con2; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction (snapshot conflict) commit; connection con1; truncate table test; @@ -582,7 +582,7 @@ update test set value = 18 where id = 2; commit; connection con1; delete from test where value = 20; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction (snapshot conflict) commit; connection con1; truncate table test; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result index a0fd7a13780c5..7eeeb35197bd4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result @@ -8,10 +8,10 @@ PRIMARY KEY (z, y) COMMENT 'zy_cf', KEY (x)) ENGINE = ROCKSDB; SELECT TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,INDEX_NAME,INDEX_TYPE,KV_FORMAT_VERSION,CF FROM INFORMATION_SCHEMA.ROCKSDB_DDL WHERE TABLE_NAME like 'is_ddl_t%'; TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION CF -test is_ddl_t2 NULL PRIMARY 1 11 zy_cf -test is_ddl_t2 NULL x 2 11 default -test is_ddl_t1 NULL PRIMARY 1 11 default -test is_ddl_t1 NULL j 2 11 default -test is_ddl_t1 NULL k 2 11 kl_cf +test is_ddl_t2 NULL PRIMARY 1 13 zy_cf +test is_ddl_t2 NULL x 2 12 default +test is_ddl_t1 NULL PRIMARY 1 13 default +test is_ddl_t1 NULL j 2 12 default +test is_ddl_t1 NULL k 2 12 kl_cf DROP TABLE is_ddl_t1; DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result new file mode 100644 index 0000000000000..22c8592ff282d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result @@ -0,0 +1,48 @@ +CREATE TABLE t1 +( +/* fields/keys for row retrieval tests */ +key1 INT, +key2 INT, +key3 INT, +key4 INT, +/* make rows much bigger then keys */ +filler1 CHAR(200), +KEY(key1), +KEY(key2) +) ENGINE=ROCKSDB; +CREATE TABLE t0 AS SELECT * FROM t1; +# Printing of many insert into t0 values (....) disabled. +# Printing of many insert into t1 select .... from t0 disabled. +# Printing of many insert into t1 (...) values (....) disabled. +SELECT COUNT(*) FROM t1; +COUNT(*) +7201 +SET GLOBAL rocksdb_force_flush_memtable_now = 1; +EXPLAIN UPDATE t1 SET filler1='to be deleted' WHERE key1=100 AND key2=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL # Using intersect(key1,key2); Using where +UPDATE t1 SET filler1='to be deleted' WHERE key1=100 and key2=100; +DROP TABLE t0, t1; +create table t1 (key1 int, key2 int, key3 int, key (key1), key (key2), key(key3)) engine=rocksdb; +insert into t1 values (1, 100, 100), (1, 200, 200), (1, 300, 300); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +set global rocksdb_force_flush_memtable_now=1; +explain select * from t1 where key1 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 5 const # NULL +explain select key1,key2 from t1 where key1 = 1 or key2 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL # Using union(key1,key2); Using where +select * from t1 where key1 = 1; +key1 key2 key3 +1 100 100 +1 200 200 +1 300 300 +select key1,key2 from t1 where key1 = 1 or key2 = 1; +key1 key2 +1 100 +1 200 +1 300 +drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb2.result b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb2.result new file mode 100644 index 0000000000000..eab9bbc2ea769 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb2.result @@ -0,0 +1,1419 @@ +set global rocksdb_force_flush_memtable_now=1; +#---------------- Index merge test 1 ------------------------------------------- +SET SESSION DEFAULT_STORAGE_ENGINE = RocksDB; +drop table if exists t0, t1, t2, t3, t4; +create table t0 +( +key1 int not null, +key2 int not null, +key3 int not null, +key4 int not null, +key5 int not null, +key6 int not null, +key7 int not null, +key8 int not null, +INDEX i1(key1), +INDEX i2(key2), +INDEX i3(key3), +INDEX i4(key4), +INDEX i5(key5), +INDEX i6(key6), +INDEX i7(key7), +INDEX i8(key8) +); +analyze table t0; +Table Op Msg_type Msg_text +test.t0 analyze status OK +explain select * from t0 where key1 < 3 or key1 > 1020; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1 i1 4 NULL 2 Using index condition +explain +select * from t0 where key1 < 3 or key2 > 1020; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using sort_union(i1,i2); Using where +select * from t0 where key1 < 3 or key2 > 1020; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +1021 1021 1021 1021 1021 1021 1021 3 +1022 1022 1022 1022 1022 1022 1022 2 +1023 1023 1023 1023 1023 1023 1023 1 +1024 1024 1024 1024 1024 1024 1024 0 +explain select * from t0 where key1 < 2 or key2 <3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain +select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40); +key1 key2 key3 key4 key5 key6 key7 key8 +31 31 31 31 31 31 31 993 +32 32 32 32 32 32 32 992 +33 33 33 33 33 33 33 991 +34 34 34 34 34 34 34 990 +35 35 35 35 35 35 35 989 +36 36 36 36 36 36 36 988 +37 37 37 37 37 37 37 987 +38 38 38 38 38 38 38 986 +39 39 39 39 39 39 39 985 +explain select * from t0 ignore index (i2) where key1 < 3 or key2 <4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1 NULL NULL NULL # Using where +explain select * from t0 where (key1 < 3 or key2 <4) and key3 = 50; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i1,i2,i3 i3 4 const # Using where +explain select * from t0 use index (i1,i2) where (key1 < 2 or key2 <3) and key3 = 50; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain select * from t0 where (key1 > 1 or key2 > 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2 NULL NULL NULL # Using where +explain select * from t0 force index (i1,i2) where (key1 > 1 or key2 > 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain +select * from t0 where key1<2 or key2<3 or (key1>5 and key1<7) or +(key1>10 and key1<12) or (key2>100 and key2<102); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain select * from t0 where key2 = 45 or key1 <=> null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1,i2 i2 4 NULL # Using where +explain select * from t0 where key2 = 45 or key1 is not null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2 NULL NULL NULL # Using where +explain select * from t0 where key2 = 45 or key1 is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i2 i2 4 const # NULL +explain select * from t0 where key2=10 or key3=3 or key4 <=> null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i2,i3,i4 i2,i3 4,4 NULL # Using union(i2,i3); Using where +explain select * from t0 where key2=10 or key3=3 or key4 is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i2,i3 i2,i3 4,4 NULL # Using union(i2,i3); Using where +explain select key1 from t0 where (key1 <=> null) or (key2 < 2) or +(key3=10) or (key4 <=> null); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i4 i2,i3 4,4 NULL # Using sort_union(i2,i3); Using where +explain select key1 from t0 where (key1 <=> null) or (key1 < 5) or +(key3=10) or (key4 <=> null); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i3,i4 i1,i3 4,4 NULL # Using sort_union(i1,i3); Using where +explain select * from t0 where +(key1 < 2 or key2 < 2) and (key3 < 3 or key4 < 3) and (key5 < 5 or key6 < 5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain +select * from t0 where (key1 < 2 or key2 < 4) and (key1 < 5 or key3 < 3); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +select * from t0 where (key1 < 2 or key2 < 4) and (key1 < 5 or key3 < 3); +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +explain select * from t0 where +(key1 < 3 or key2 < 2) and (key3 < 3 or key4 < 3) and (key5 < 2 or key6 < 2); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain select * from t0 where +(key1 < 3 or key2 < 3) and (key3 < 70); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1,i2,i3 i3 4 NULL # Using index condition; Using where +explain select * from t0 where +(key1 < 3 or key2 < 3) and (key3 < 1000); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +explain select * from t0 where +((key1 < 3 or key2 < 3) and (key2 <4 or key3 < 3)) +or +key2 > 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2,i3 NULL NULL NULL # Using where +explain select * from t0 where +((key1 < 4 or key2 < 4) and (key2 <4 or key3 < 3)) +or +key1 < 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +select * from t0 where +((key1 < 4 or key2 < 4) and (key2 <4 or key3 < 3)) +or +key1 < 5; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +4 4 4 4 4 4 4 1020 +explain select * from t0 where +((key1 < 2 or key2 < 2) and (key3 <4 or key5 < 3)) +or +((key5 < 3 or key6 < 3) and (key7 <3 or key8 < 3)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i1,i2,i5,i6 4,4,4,4 NULL # Using sort_union(i1,i2,i5,i6); Using where +explain select * from t0 where +((key3 <3 or key5 < 4) and (key1 < 3 or key2 < 3)) +or +((key7 <5 or key8 < 3) and (key5 < 4 or key6 < 4)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i3,i5,i7,i8 4,4,4,4 NULL # Using sort_union(i3,i5,i7,i8); Using where +explain select * from t0 where +((key3 <3 or key5 < 4) and (key1 < 3 or key2 < 4)) +or +((key3 <4 or key5 < 2) and (key5 < 5 or key6 < 3)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 4,4 NULL # Using sort_union(i3,i5); Using where +explain select * from t0 where +((key3 <4 or key5 < 3) and (key1 < 3 or key2 < 3)) +or +(((key3 <5 and key7 < 5) or key5 < 2) and (key5 < 4 or key6 < 4)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7 i3,i5 4,4 NULL # Using sort_union(i3,i5); Using where +explain select * from t0 where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +((key3 >5 or key5 < 2) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2,i3,i5,i6 NULL NULL NULL # Using where +explain select * from t0 force index(i1, i2, i3, i4, i5, i6 ) where +((key3 <3 or key5 < 4) and (key1 < 3 or key2 < 3)) +or +((key3 >4 or key5 < 2) and (key5 < 5 or key6 < 4)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 4,4 NULL # Using sort_union(i3,i5); Using where +explain select * from t0 force index(i1, i2, i3, i4, i5, i6 ) where +((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4)) +or +((key3 >=5 or key5 < 2) and (key5 < 5 or key6 < 6)); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ALL i1,i2,i3,i5,i6 NULL NULL NULL # Using where +select * from t0 where key1 < 3 or key8 < 2 order by key1; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +1023 1023 1023 1023 1023 1023 1023 1 +1024 1024 1024 1024 1024 1024 1024 0 +explain +select * from t0 where key1 < 3 or key8 < 2 order by key1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i8 i1,i8 4,4 NULL # Using sort_union(i1,i8); Using where; Using filesort +create table t2 like t0; +insert into t2 select * from t0; +alter table t2 add index i1_3(key1, key3); +alter table t2 add index i2_3(key2, key3); +alter table t2 drop index i1; +alter table t2 drop index i2; +alter table t2 add index i321(key3, key2, key1); +explain select key3 from t2 where key1 = 100 or key2 = 100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL # Using sort_union(i1_3,i2_3); Using where +explain select key3 from t2 where key1 <100 or key2 < 100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL # Using sort_union(i1_3,i2_3); Using where +explain select key7 from t2 where key1 <100 or key2 < 100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL # Using sort_union(i1_3,i2_3); Using where +create table t4 ( +key1a int not null, +key1b int not null, +key2 int not null, +key2_1 int not null, +key2_2 int not null, +key3 int not null, +index i1a (key1a, key1b), +index i1b (key1b, key1a), +index i2_1(key2, key2_1), +index i2_2(key2, key2_1) +); +Warnings: +Note 1831 Duplicate index 'i2_2' defined on the table 'test.t4'. This is deprecated and will be disallowed in a future release. +insert into t4 select key1,key1,key1 div 10, key1 % 10, key1 % 10, key1 from t0; +select * from t4 where key1a = 3 or key1b = 4; +key1a key1b key2 key2_1 key2_2 key3 +3 3 0 3 3 3 +4 4 0 4 4 4 +explain select * from t4 where key1a = 3 or key1b = 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 index_merge i1a,i1b i1a,i1b 4,4 NULL 2 Using sort_union(i1a,i1b); Using where +explain select * from t4 where key2 = 1 and (key2_1 = 1 or key3 = 5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 1 Using where +explain select * from t4 where key2 = 1 and (key2_1 = 1 or key2_2 = 5); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 1 Using where +explain select * from t4 where key2_1 = 1 or key2_2 = 5; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t4 ALL NULL NULL NULL NULL # Using where +create table t1 like t0; +insert into t1 select * from t0; +explain select * from t0 left join t1 on (t0.key1=t1.key1) +where t0.key1=3 or t0.key2=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 NULL +select * from t0 left join t1 on (t0.key1=t1.key1) +where t0.key1=3 or t0.key2=4; +key1 key2 key3 key4 key5 key6 key7 key8 key1 key2 key3 key4 key5 key6 key7 key8 +3 3 3 3 3 3 3 1021 3 3 3 3 3 3 3 1021 +4 4 4 4 4 4 4 1020 4 4 4 4 4 4 4 1020 +explain +select * from t0,t1 where (t0.key1=t1.key1) and ( t0.key1=3 or t0.key2=4); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +1 SIMPLE t1 ref i1 i1 4 test.t0.key1 1 NULL +explain +select * from t0,t1 where (t0.key1=t1.key1) and +(t0.key1=3 or t0.key2<4) and t1.key1=2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i1,i2 i1 4 const 1 Using where +1 SIMPLE t1 ref i1 i1 4 const 1 NULL +explain select * from t0,t1 where t0.key1 = 5 and +(t1.key1 = t0.key1 or t1.key8 = t0.key1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 ref i1 i1 4 const 1 NULL +1 SIMPLE t1 index_merge i1,i8 i1,i8 4,4 NULL 2 Using union(i1,i8); Using where; Using join buffer (Block Nested Loop) +explain select * from t0,t1 where t0.key1 < 3 and +(t1.key1 = t0.key1 or t1.key8 = t0.key1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 range i1 i1 4 NULL # Using index condition +1 SIMPLE t1 ALL i1,i8 NULL NULL NULL # Range checked for each record (index map: 0x81) +explain select * from t1 where key1=3 or key2=4 +union select * from t1 where key1<4 or key3=5; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +2 UNION t1 index_merge i1,i3 i1,i3 4,4 NULL 2 Using sort_union(i1,i3); Using where +NULL UNION RESULT ALL NULL NULL NULL NULL NULL Using temporary +explain select * from (select * from t1 where key1 = 3 or key2 =3) as Z where key8 >5; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 2 Using where +2 DERIVED t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where +create table t3 like t0; +insert into t3 select * from t0; +alter table t3 add key9 int not null, add index i9(key9); +alter table t3 add keyA int not null, add index iA(keyA); +alter table t3 add keyB int not null, add index iB(keyB); +alter table t3 add keyC int not null, add index iC(keyC); +update t3 set key9=key1,keyA=key1,keyB=key1,keyC=key1; +explain select * from t3 where +key1=1 or key2=2 or key3=3 or key4=4 or +key5=5 or key6=6 or key7=7 or key8=8 or +key9=9 or keyA=10 or keyB=11 or keyC=12; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 index_merge i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC 4,4,4,4,4,4,4,4,4,4,4,4 NULL 12 Using union(i1,i2,i3,i4,i5,i6,i7,i8,i9,iA,iB,iC); Using where +select * from t3 where +key1=1 or key2=2 or key3=3 or key4=4 or +key5=5 or key6=6 or key7=7 or key8=8 or +key9=9 or keyA=10 or keyB=11 or keyC=12; +key1 key2 key3 key4 key5 key6 key7 key8 key9 keyA keyB keyC +1 1 1 1 1 1 1 1023 1 1 1 1 +2 2 2 2 2 2 2 1022 2 2 2 2 +3 3 3 3 3 3 3 1021 3 3 3 3 +4 4 4 4 4 4 4 1020 4 4 4 4 +5 5 5 5 5 5 5 1019 5 5 5 5 +6 6 6 6 6 6 6 1018 6 6 6 6 +7 7 7 7 7 7 7 1017 7 7 7 7 +9 9 9 9 9 9 9 1015 9 9 9 9 +10 10 10 10 10 10 10 1014 10 10 10 10 +11 11 11 11 11 11 11 1013 11 11 11 11 +12 12 12 12 12 12 12 1012 12 12 12 12 +1016 1016 1016 1016 1016 1016 1016 8 1016 1016 1016 1016 +explain select * from t0 where key1 < 3 or key2 < 4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using sort_union(i1,i2); Using where +select * from t0 where key1 < 3 or key2 < 4; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 1023 +2 2 2 2 2 2 2 1022 +3 3 3 3 3 3 3 1021 +update t0 set key8=123 where key1 < 3 or key2 < 4; +select * from t0 where key1 < 3 or key2 < 4; +key1 key2 key3 key4 key5 key6 key7 key8 +1 1 1 1 1 1 1 123 +2 2 2 2 2 2 2 123 +3 3 3 3 3 3 3 123 +delete from t0 where key1 < 3 or key2 < 4; +select * from t0 where key1 < 3 or key2 < 4; +key1 key2 key3 key4 key5 key6 key7 key8 +select count(*) from t0; +count(*) +1021 +drop table t4; +create table t4 (a int); +insert into t4 values (1),(4),(3); +set @save_join_buffer_size=@@join_buffer_size; +set join_buffer_size= 4096; +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 < 500000 or A.key2 < 3) +and (B.key1 < 500000 or B.key2 < 3); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where +1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL # Using sort_union(i1,i2); Using where; Using join buffer (Block Nested Loop) +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 < 500000 or A.key2 < 3) +and (B.key1 < 500000 or B.key2 < 3); +max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +10240 +update t0 set key1=1; +explain select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 = 1 or A.key2 = 1) +and (B.key1 = 1 or B.key2 = 1); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL # Using union(i1,i2); Using where +1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL # Using union(i1,i2); Using where; Using join buffer (Block Nested Loop) +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A force index(i1,i2), t0 as B force index (i1,i2) +where (A.key1 = 1 or A.key2 = 1) +and (B.key1 = 1 or B.key2 = 1); +max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +8194 +alter table t0 add filler1 char(200), add filler2 char(200), add filler3 char(200); +update t0 set key2=1, key3=1, key4=1, key5=1,key6=1,key7=1 where key7 < 500; +select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +from t0 as A, t0 as B +where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1) +and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1); +max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5) +8186 +set join_buffer_size= @save_join_buffer_size; +drop table t0, t1, t2, t3, t4; +CREATE TABLE t1 ( +cola char(3) not null, colb char(3) not null, filler char(200), +key(cola), key(colb) +); +INSERT INTO t1 VALUES ('foo','bar', 'ZZ'),('fuz','baz', 'ZZ'); +OPTIMIZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 optimize status OK +select count(*) from t1; +count(*) +8704 +explain select * from t1 WHERE cola = 'foo' AND colb = 'bar'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge cola,colb cola,colb 3,3 NULL # Using intersect(cola,colb); Using where +explain select * from t1 force index(cola,colb) WHERE cola = 'foo' AND colb = 'bar'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge cola,colb cola,colb 3,3 NULL # Using intersect(cola,colb); Using where +drop table t1; +CREATE TABLE t1(a INT); +INSERT INTO t1 VALUES(1); +CREATE TABLE t2(a INT, b INT, dummy CHAR(16) DEFAULT '', KEY(a), KEY(b)); +INSERT INTO t2(a,b) VALUES +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), +(1,2); +LOCK TABLES t1 WRITE, t2 WRITE; +INSERT INTO t2(a,b) VALUES(1,2); +SELECT t2.a FROM t1,t2 WHERE t2.b=2 AND t2.a=1; +a +1 +1 +UNLOCK TABLES; +DROP TABLE t1, t2; +CREATE TABLE `t1` ( +`a` int(11) DEFAULT NULL, +`filler` char(200) DEFAULT NULL, +`b` int(11) DEFAULT NULL, +KEY `a` (`a`), +KEY `b` (`b`) +) ENGINE=MEMORY DEFAULT CHARSET=latin1; +insert into t1 values +(0, 'filler', 0), (1, 'filler', 1), (2, 'filler', 2), (3, 'filler', 3), +(4, 'filler', 4), (5, 'filler', 5), (6, 'filler', 6), (7, 'filler', 7), +(8, 'filler', 8), (9, 'filler', 9), (0, 'filler', 0), (1, 'filler', 1), +(2, 'filler', 2), (3, 'filler', 3), (4, 'filler', 4), (5, 'filler', 5), +(6, 'filler', 6), (7, 'filler', 7), (8, 'filler', 8), (9, 'filler', 9), +(10, 'filler', 10), (11, 'filler', 11), (12, 'filler', 12), (13, 'filler', 13), +(14, 'filler', 14), (15, 'filler', 15), (16, 'filler', 16), (17, 'filler', 17), +(18, 'filler', 18), (19, 'filler', 19), (4, '5 ', 0), (5, '4 ', 0), +(4, '4 ', 0), (4, 'qq ', 5), (5, 'qq ', 4), (4, 'zz ', 4); +create table t2( +`a` int(11) DEFAULT NULL, +`filler` char(200) DEFAULT NULL, +`b` int(11) DEFAULT NULL, +KEY USING BTREE (`a`), +KEY USING BTREE (`b`) +) ENGINE=MEMORY DEFAULT CHARSET=latin1; +insert into t2 select * from t1; +must use sort-union rather than union: +explain select * from t1 where a=4 or b=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge a,b a,b 5,5 NULL # Using sort_union(a,b); Using where +select * from t1 where a=4 or b=4; +a filler b +4 4 0 +4 5 0 +4 filler 4 +4 filler 4 +4 qq 5 +4 zz 4 +5 qq 4 +select * from t1 ignore index(a,b) where a=4 or b=4; +a filler b +4 4 0 +4 5 0 +4 filler 4 +4 filler 4 +4 qq 5 +4 zz 4 +5 qq 4 +must use union, not sort-union: +explain select * from t2 where a=4 or b=4; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index_merge a,b a,b 5,5 NULL # Using union(a,b); Using where +select * from t2 where a=4 or b=4; +a filler b +4 4 0 +4 5 0 +4 filler 4 +4 filler 4 +4 qq 5 +4 zz 4 +5 qq 4 +drop table t1, t2; +CREATE TABLE t1 (a varchar(8), b set('a','b','c','d','e','f','g','h'), +KEY b(b), KEY a(a)); +INSERT INTO t1 VALUES ('y',''), ('z',''); +SELECT b,a from t1 WHERE (b!='c' AND b!='f' && b!='h') OR +(a='pure-S') OR (a='DE80337a') OR (a='DE80799'); +b a + y + z +DROP TABLE t1; +# +# BUG#40974: Incorrect query results when using clause evaluated using range check +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int); +insert into t1 values (1),(2); +create table t2(a int, b int); +insert into t2 values (1,1), (2, 1000); +create table t3 (a int, b int, filler char(100), key(a), key(b)); +insert into t3 select 1000, 1000,'filler' from t0 A, t0 B, t0 C; +insert into t3 values (1,1,'data'); +insert into t3 values (1,1,'data'); +The plan should be ALL/ALL/ALL(Range checked for each record (index map: 0x3) +explain select * from t1 +where exists (select 1 from t2, t3 +where t2.a=t1.a and (t3.a=t2.b or t3.b=t2.b or t3.b=t2.b+1)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL # Using where +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL # Using where +2 DEPENDENT SUBQUERY t3 ALL a,b NULL NULL NULL # Range checked for each record (index map: 0x3) +select * from t1 +where exists (select 1 from t2, t3 +where t2.a=t1.a and (t3.a=t2.b or t3.b=t2.b or t3.b=t2.b+1)); +a +1 +2 +drop table t0, t1, t2, t3; +# +# BUG#44810: index merge and order by with low sort_buffer_size +# crashes server! +# +CREATE TABLE t1(a VARCHAR(128),b VARCHAR(128),KEY(A),KEY(B)); +INSERT INTO t1 VALUES (REPEAT('a',128),REPEAT('b',128)); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +SET SESSION sort_buffer_size=1; +Warnings: +Warning 1292 Truncated incorrect sort_buffer_size value: '1' +EXPLAIN +SELECT * FROM t1 FORCE INDEX(a,b) WHERE a LIKE 'a%' OR b LIKE 'b%' +ORDER BY a,b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge a,b a,b 131,131 NULL # Using sort_union(a,b); Using where; Using filesort +SELECT * FROM t1 FORCE INDEX(a,b) WHERE a LIKE 'a%' OR b LIKE 'b%' +ORDER BY a,b; +SET SESSION sort_buffer_size=DEFAULT; +DROP TABLE t1; +End of 5.0 tests +set global rocksdb_force_flush_memtable_now=1; +#---------------- ROR-index_merge tests ----------------------- +SET SESSION DEFAULT_STORAGE_ENGINE = RocksDB; +drop table if exists t0,t1,t2; +create table t1 +( +/* Field names reflect value(rowid) distribution, st=STairs, swt= SaWTooth */ +st_a int not null default 0, +swt1a int not null default 0, +swt2a int not null default 0, +st_b int not null default 0, +swt1b int not null default 0, +swt2b int not null default 0, +/* fields/keys for row retrieval tests */ +key1 int, +key2 int, +key3 int, +key4 int, +/* make rows much bigger then keys */ +filler1 char (200), +filler2 char (200), +filler3 char (200), +filler4 char (200), +filler5 char (200), +filler6 char (200), +/* order of keys is important */ +key sta_swt12a(st_a,swt1a,swt2a), +key sta_swt1a(st_a,swt1a), +key sta_swt2a(st_a,swt2a), +key sta_swt21a(st_a,swt2a,swt1a), +key st_a(st_a), +key stb_swt1a_2b(st_b,swt1b,swt2a), +key stb_swt1b(st_b,swt1b), +key st_b(st_b), +key(key1), +key(key2), +key(key3), +key(key4) +) ; +create table t0 as select * from t1; +# Printing of many insert into t0 values (....) disabled. +alter table t1 disable keys; +Warnings: +Note 1031 Table storage engine for 't1' doesn't have this option +# Printing of many insert into t1 select .... from t0 disabled. +# Printing of many insert into t1 (...) values (....) disabled. +alter table t1 enable keys; +Warnings: +Note 1031 Table storage engine for 't1' doesn't have this option +select count(*) from t1; +count(*) +64801 +explain select key1,key2 from t1 where key1=100 and key2=100; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge key1,key2 key2,key1 5,5 NULL 2 Using intersect(key2,key1); Using where; Using index +select key1,key2 from t1 where key1=100 and key2=100; +key1 key2 +100 100 +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +100 100 100 100 key1-key2-key3-key4 +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, -1, -1, 'key1-key2'); +insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 100, 100, 'key4-key3'); +select key1,key2,filler1 from t1 where key1=100 and key2=100; +key1 key2 filler1 +100 100 key1-key2-key3-key4 +100 100 key1-key2 +select key1,key2 from t1 where key1=100 and key2=100; +key1 key2 +100 100 +100 100 +select key1,key2,key3,key4 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 +100 100 100 100 +100 100 -1 -1 +-1 -1 100 100 +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +100 100 100 100 key1-key2-key3-key4 +100 100 -1 -1 key1-key2 +-1 -1 100 100 key4-key3 +select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100; +key1 key2 key3 +100 100 100 +insert into t1 (key1,key2,key3,key4,filler1) values (101,101,101,101, 'key1234-101'); +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=101; +key1 key2 key3 key4 filler1 +100 100 100 100 key1-key2-key3-key4 +100 100 -1 -1 key1-key2 +101 101 101 101 key1234-101 +select key1,key2, filler1 from t1 where key1=100 and key2=100; +key1 key2 filler1 +100 100 key1-key2-key3-key4 +100 100 key1-key2 +update t1 set filler1='to be deleted' where key1=100 and key2=100; +update t1 set key1=200,key2=200 where key1=100 and key2=100; +delete from t1 where key1=200 and key2=200; +select key1,key2,filler1 from t1 where key2=100 and key2=200; +key1 key2 filler1 +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +-1 -1 100 100 key4-key3 +delete from t1 where key3=100 and key4=100; +select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100; +key1 key2 key3 key4 filler1 +select key1,key2 from t1 where key1=100 and key2=100; +key1 key2 +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-1'); +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-2'); +insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-3'); +select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +key1 key2 key3 key4 filler1 +100 100 200 200 key1-key2-key3-key4-1 +100 100 200 200 key1-key2-key3-key4-2 +100 100 200 200 key1-key2-key3-key4-3 +insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, -1, 200,'key4'); +select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +key1 key2 key3 key4 filler1 +100 100 200 200 key1-key2-key3-key4-1 +100 100 200 200 key1-key2-key3-key4-2 +100 100 200 200 key1-key2-key3-key4-3 +-1 -1 -1 200 key4 +insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 200, -1,'key3'); +select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200; +key1 key2 key3 key4 filler1 +100 100 200 200 key1-key2-key3-key4-1 +100 100 200 200 key1-key2-key3-key4-2 +100 100 200 200 key1-key2-key3-key4-3 +-1 -1 -1 200 key4 +-1 -1 200 -1 key3 +drop table t0,t1; +create table t2 ( +a char(10), +b char(10), +filler1 char(255), +filler2 char(255), +key(a(5)), +key(b(5)) +); +select count(a) from t2 where a='BBBBBBBB'; +count(a) +4 +select count(a) from t2 where b='BBBBBBBB'; +count(a) +4 +expla_or_bin select count(a_or_b) from t2 where a_or_b='AAAAAAAA' a_or_bnd a_or_b='AAAAAAAA'; +id select_type ta_or_ba_or_ble type possia_or_ble_keys key key_len ref rows Extra_or_b +1 SIMPLE t2 ref a_or_b,a_or_b a_or_b 6 const 1 Using where +select count(a) from t2 where a='AAAAAAAA' and b='AAAAAAAA'; +count(a) +4 +select count(a) from t2 ignore index(a,b) where a='AAAAAAAA' and b='AAAAAAAA'; +count(a) +4 +insert into t2 values ('ab', 'ab', 'uh', 'oh'); +explain select a from t2 where a='ab'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ref a a 6 const 1 Using where +drop table t2; +CREATE TABLE t1(c1 INT, c2 INT DEFAULT 0, c3 CHAR(255) DEFAULT '', +KEY(c1), KEY(c2), KEY(c3)); +INSERT INTO t1(c1) VALUES(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0), +(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0),(0); +INSERT INTO t1 VALUES(0,0,0); +CREATE TABLE t2(c1 int); +INSERT INTO t2 VALUES(1); +DELETE t1 FROM t1,t2 WHERE t1.c1=0 AND t1.c2=0; +SELECT * FROM t1; +c1 c2 c3 +DROP TABLE t1,t2; +set global rocksdb_force_flush_memtable_now=1; +#---------------- Index merge test 2 ------------------------------------------- +SET SESSION DEFAULT_STORAGE_ENGINE = RocksDB; +drop table if exists t1,t2; +create table t1 +( +key1 int not null, +key2 int not null, +INDEX i1(key1), +INDEX i2(key2) +); +explain select * from t1 where key1 < 5 or key2 > 197; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using sort_union(i1,i2); Using where +select * from t1 where key1 < 5 or key2 > 197; +key1 key2 +0 200 +1 199 +2 198 +3 197 +4 196 +explain select * from t1 where key1 < 3 or key2 > 195; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using sort_union(i1,i2); Using where +select * from t1 where key1 < 3 or key2 > 195; +key1 key2 +0 200 +1 199 +2 198 +3 197 +4 196 +alter table t1 add str1 char (255) not null, +add zeroval int not null default 0, +add str2 char (255) not null, +add str3 char (255) not null; +update t1 set str1='aaa', str2='bbb', str3=concat(key2, '-', key1 div 2, '_' ,if(key1 mod 2 = 0, 'a', 'A')); +alter table t1 add primary key (str1, zeroval, str2, str3); +explain select * from t1 where key1 < 5 or key2 > 197; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL i1,i2 NULL NULL NULL 200 Using where +select * from t1 where key1 < 5 or key2 > 197; +key1 key2 str1 zeroval str2 str3 +4 196 aaa 0 bbb 196-2_a +3 197 aaa 0 bbb 197-1_A +2 198 aaa 0 bbb 198-1_a +1 199 aaa 0 bbb 199-0_A +0 200 aaa 0 bbb 200-0_a +explain select * from t1 where key1 < 3 or key2 > 195; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL i1,i2 NULL NULL NULL 200 Using where +select * from t1 where key1 < 3 or key2 > 195; +key1 key2 str1 zeroval str2 str3 +4 196 aaa 0 bbb 196-2_a +3 197 aaa 0 bbb 197-1_A +2 198 aaa 0 bbb 198-1_a +1 199 aaa 0 bbb 199-0_A +0 200 aaa 0 bbb 200-0_a +drop table t1; +create table t1 ( +pk integer not null auto_increment primary key, +key1 integer, +key2 integer not null, +filler char (200), +index (key1), +index (key2) +); +show warnings; +Level Code Message +explain select pk from t1 where key1 = 1 and key2 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,key2 key1 5 const 1 Using where +select pk from t1 where key2 = 1 and key1 = 1; +pk +26 +27 +select pk from t1 ignore index(key1,key2) where key2 = 1 and key1 = 1; +pk +26 +27 +drop table t1; +create table t1 ( +pk int primary key auto_increment, +key1a int, +key2a int, +key1b int, +key2b int, +dummy1 int, +dummy2 int, +dummy3 int, +dummy4 int, +key3a int, +key3b int, +filler1 char (200), +index i1(key1a, key1b), +index i2(key2a, key2b), +index i3(key3a, key3b) +); +create table t2 (a int); +insert into t2 values (0),(1),(2),(3),(4),(NULL); +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +select A.a, B.a, C.a, D.a, C.a, D.a from t2 A,t2 B,t2 C, t2 D; +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +select key1a, key1b, key2a, key2b, key3a, key3b from t1; +insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b) +select key1a, key1b, key2a, key2b, key3a, key3b from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +select count(*) from t1; +count(*) +5184 +select count(*) from t1 where +key1a = 2 and key1b is null and key2a = 2 and key2b is null; +count(*) +4 +select count(*) from t1 where +key1a = 2 and key1b is null and key3a = 2 and key3b is null; +count(*) +4 +drop table t1,t2; +create table t1 ( +id1 int, +id2 date , +index idx2 (id1,id2), +index idx1 (id2) +); +insert into t1 values(1,'20040101'), (2,'20040102'); +select * from t1 where id1 = 1 and id2= '20040101'; +id1 id2 +1 2004-01-01 +drop table t1; +drop view if exists v1; +CREATE TABLE t1 ( +`oid` int(11) unsigned NOT NULL auto_increment, +`fk_bbk_niederlassung` int(11) unsigned NOT NULL, +`fk_wochentag` int(11) unsigned NOT NULL, +`uhrzeit_von` time NOT NULL COMMENT 'HH:MM', +`uhrzeit_bis` time NOT NULL COMMENT 'HH:MM', +`geloescht` tinyint(4) NOT NULL, +`version` int(5) NOT NULL, +PRIMARY KEY (`oid`), +KEY `fk_bbk_niederlassung` (`fk_bbk_niederlassung`), +KEY `fk_wochentag` (`fk_wochentag`), +KEY `ix_version` (`version`) +) DEFAULT CHARSET=latin1; +insert into t1 values +(1, 38, 1, '08:00:00', '13:00:00', 0, 1), +(2, 38, 2, '08:00:00', '13:00:00', 0, 1), +(3, 38, 3, '08:00:00', '13:00:00', 0, 1), +(4, 38, 4, '08:00:00', '13:00:00', 0, 1), +(5, 38, 5, '08:00:00', '13:00:00', 0, 1), +(6, 38, 5, '08:00:00', '13:00:00', 1, 2), +(7, 38, 3, '08:00:00', '13:00:00', 1, 2), +(8, 38, 1, '08:00:00', '13:00:00', 1, 2), +(9, 38, 2, '08:00:00', '13:00:00', 1, 2), +(10, 38, 4, '08:00:00', '13:00:00', 1, 2), +(11, 38, 1, '08:00:00', '13:00:00', 0, 3), +(12, 38, 2, '08:00:00', '13:00:00', 0, 3), +(13, 38, 3, '08:00:00', '13:00:00', 0, 3), +(14, 38, 4, '08:00:00', '13:00:00', 0, 3), +(15, 38, 5, '08:00:00', '13:00:00', 0, 3), +(16, 38, 4, '08:00:00', '13:00:00', 0, 4), +(17, 38, 5, '08:00:00', '13:00:00', 0, 4), +(18, 38, 1, '08:00:00', '13:00:00', 0, 4), +(19, 38, 2, '08:00:00', '13:00:00', 0, 4), +(20, 38, 3, '08:00:00', '13:00:00', 0, 4), +(21, 7, 1, '08:00:00', '13:00:00', 0, 1), +(22, 7, 2, '08:00:00', '13:00:00', 0, 1), +(23, 7, 3, '08:00:00', '13:00:00', 0, 1), +(24, 7, 4, '08:00:00', '13:00:00', 0, 1), +(25, 7, 5, '08:00:00', '13:00:00', 0, 1); +create view v1 as +select +zeit1.oid AS oid, +zeit1.fk_bbk_niederlassung AS fk_bbk_niederlassung, +zeit1.fk_wochentag AS fk_wochentag, +zeit1.uhrzeit_von AS uhrzeit_von, +zeit1.uhrzeit_bis AS uhrzeit_bis, +zeit1.geloescht AS geloescht, +zeit1.version AS version +from +t1 zeit1 +where +(zeit1.version = +(select max(zeit2.version) AS `max(version)` + from t1 zeit2 +where +((zeit1.fk_bbk_niederlassung = zeit2.fk_bbk_niederlassung) and +(zeit1.fk_wochentag = zeit2.fk_wochentag) and +(zeit1.uhrzeit_von = zeit2.uhrzeit_von) and +(zeit1.uhrzeit_bis = zeit2.uhrzeit_bis) +) +) +) +and (zeit1.geloescht = 0); +select * from v1 where oid = 21; +oid fk_bbk_niederlassung fk_wochentag uhrzeit_von uhrzeit_bis geloescht version +21 7 1 08:00:00 13:00:00 0 1 +drop view v1; +drop table t1; +CREATE TABLE t1( +t_cpac varchar(2) NOT NULL, +t_vers varchar(4) NOT NULL, +t_rele varchar(2) NOT NULL, +t_cust varchar(4) NOT NULL, +filler1 char(250) default NULL, +filler2 char(250) default NULL, +PRIMARY KEY (t_cpac,t_vers,t_rele,t_cust), +UNIQUE KEY IX_4 (t_cust,t_cpac,t_vers,t_rele), +KEY IX_5 (t_vers,t_rele,t_cust) +); +insert into t1 values +('tm','2.5 ','a ',' ','',''), ('tm','2.5U','a ','stnd','',''), +('da','3.3 ','b ',' ','',''), ('da','3.3U','b ','stnd','',''), +('tl','7.6 ','a ',' ','',''), ('tt','7.6 ','a ',' ','',''), +('bc','B61 ','a ',' ','',''), ('bp','B61 ','a ',' ','',''), +('ca','B61 ','a ',' ','',''), ('ci','B61 ','a ',' ','',''), +('cp','B61 ','a ',' ','',''), ('dm','B61 ','a ',' ','',''), +('ec','B61 ','a ',' ','',''), ('ed','B61 ','a ',' ','',''), +('fm','B61 ','a ',' ','',''), ('nt','B61 ','a ',' ','',''), +('qm','B61 ','a ',' ','',''), ('tc','B61 ','a ',' ','',''), +('td','B61 ','a ',' ','',''), ('tf','B61 ','a ',' ','',''), +('tg','B61 ','a ',' ','',''), ('ti','B61 ','a ',' ','',''), +('tp','B61 ','a ',' ','',''), ('ts','B61 ','a ',' ','',''), +('wh','B61 ','a ',' ','',''), ('bc','B61U','a ','stnd','',''), +('bp','B61U','a ','stnd','',''), ('ca','B61U','a ','stnd','',''), +('ci','B61U','a ','stnd','',''), ('cp','B61U','a ','stnd','',''), +('dm','B61U','a ','stnd','',''), ('ec','B61U','a ','stnd','',''), +('fm','B61U','a ','stnd','',''), ('nt','B61U','a ','stnd','',''), +('qm','B61U','a ','stnd','',''), ('tc','B61U','a ','stnd','',''), +('td','B61U','a ','stnd','',''), ('tf','B61U','a ','stnd','',''), +('tg','B61U','a ','stnd','',''), ('ti','B61U','a ','stnd','',''), +('tp','B61U','a ','stnd','',''), ('ts','B61U','a ','stnd','',''), +('wh','B61U','a ','stnd','',''); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `t_cpac` varchar(2) NOT NULL, + `t_vers` varchar(4) NOT NULL, + `t_rele` varchar(2) NOT NULL, + `t_cust` varchar(4) NOT NULL, + `filler1` char(250) DEFAULT NULL, + `filler2` char(250) DEFAULT NULL, + PRIMARY KEY (`t_cpac`,`t_vers`,`t_rele`,`t_cust`), + UNIQUE KEY `IX_4` (`t_cust`,`t_cpac`,`t_vers`,`t_rele`), + KEY `IX_5` (`t_vers`,`t_rele`,`t_cust`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6'; +t_vers t_rele t_cust filler1 +7.6 a +7.6 a +select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6' + and t_rele='a' and t_cust = ' '; +t_vers t_rele t_cust filler1 +7.6 a +7.6 a +drop table t1; +create table t1 ( +pk int(11) not null auto_increment, +a int(11) not null default '0', +b int(11) not null default '0', +c int(11) not null default '0', +filler1 datetime, filler2 varchar(15), +filler3 longtext, +kp1 varchar(4), kp2 varchar(7), +kp3 varchar(2), kp4 varchar(4), +kp5 varchar(7), +filler4 char(1), +primary key (pk), +key idx1(a,b,c), +key idx2(c), +key idx3(kp1,kp2,kp3,kp4,kp5) +) default charset=latin1; +set @fill=NULL; +SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND +kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R '; +COUNT(*) +1 +drop table t1; +create table t1 +( +key1 int not null, +key2 int not null default 0, +key3 int not null default 0 +); +insert into t1(key1) values (1),(2),(3),(4),(5),(6),(7),(8); +set @d=8; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +insert into t1 (key1) select key1+@d from t1; +set @d=@d*2; +alter table t1 add index i2(key2); +alter table t1 add index i3(key3); +update t1 set key2=key1,key3=key1; +select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40); +key1 key2 key3 +31 31 31 +32 32 32 +33 33 33 +34 34 34 +35 35 35 +36 36 36 +37 37 37 +38 38 38 +39 39 39 +drop table t1; +# +# Bug#56423: Different count with SELECT and CREATE SELECT queries +# +CREATE TABLE t1 ( +a INT, +b INT, +c INT, +d INT, +PRIMARY KEY (a), +KEY (c), +KEY bd (b,d) +); +INSERT INTO t1 VALUES +(1, 0, 1, 0), +(2, 1, 1, 1), +(3, 1, 1, 1), +(4, 0, 1, 1); +EXPLAIN +SELECT a +FROM t1 +WHERE c = 1 AND b = 1 AND d = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref c,bd c 5 const 1 Using where +CREATE TABLE t2 ( a INT ) +SELECT a +FROM t1 +WHERE c = 1 AND b = 1 AND d = 1; +SELECT * FROM t2; +a +2 +3 +DROP TABLE t1, t2; +CREATE TABLE t1( a INT, b INT, KEY(a), KEY(b) ); +INSERT INTO t1 VALUES (1, 2), (1, 2), (1, 2), (1, 2); +SELECT * FROM t1 FORCE INDEX(a, b) WHERE a = 1 AND b = 2; +a b +1 2 +1 2 +1 2 +1 2 +DROP TABLE t1; +# Code coverage of fix. +CREATE TABLE t1 ( a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b INT); +INSERT INTO t1 (b) VALUES (1); +UPDATE t1 SET b = 2 WHERE a = 1; +SELECT * FROM t1; +a b +1 2 +CREATE TABLE t2 ( a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(1) ); +INSERT INTO t2 (b) VALUES ('a'); +UPDATE t2 SET b = 'b' WHERE a = 1; +SELECT * FROM t2; +a b +1 b +DROP TABLE t1, t2; +# +# BUG#13970015: ASSERT `MIN_ENDP || MAX_ENDP' FAILED IN +# HANDLER::MULTI_RANGE_READ_INFO_CONST +# +CREATE TABLE t1 ( +pk INT NOT NULL, +col_int_key INT NOT NULL, +col_varchar_key VARCHAR(1) NOT NULL, +PRIMARY KEY (pk), +KEY col_int_key (col_int_key), +KEY col_varchar_key (col_varchar_key,col_int_key) +); +INSERT INTO t1 VALUES (1,1,'a'), (2,2,'b'); +EXPLAIN +SELECT col_int_key +FROM t1 +WHERE col_varchar_key >= 'l' OR +(((pk BETWEEN 141 AND 141) OR col_varchar_key <> 'l') +AND ((pk BETWEEN 141 AND 141) OR (col_int_key > 141))); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index PRIMARY,col_int_key,col_varchar_key col_varchar_key 7 NULL 2 Using where; Using index +SELECT col_int_key +FROM t1 +WHERE col_varchar_key >= 'l' OR +(((pk BETWEEN 141 AND 141) OR col_varchar_key <> 'l') +AND ((pk BETWEEN 141 AND 141) OR (col_int_key > 141))); +col_int_key +DROP TABLE t1; +set global rocksdb_force_flush_memtable_now=1; +#---------------- 2-sweeps read Index merge test 2 ------------------------------- +SET SESSION DEFAULT_STORAGE_ENGINE = RocksDB; +drop table if exists t1; +create table t1 ( +pk int primary key, +key1 int, +key2 int, +filler char(200), +filler2 char(200), +index(key1), +index(key2) +); +select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 ); +pk key1 key2 filler filler2 +10 10 10 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +5 5 5 filler-data filler-data-2 +6 6 6 filler-data filler-data-2 +7 7 7 filler-data filler-data-2 +8 8 8 filler-data filler-data-2 +9 9 9 filler-data filler-data-2 +set @maxv=1000; +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or key1=18 or key1=60; +pk key1 key2 filler filler2 +1 1 1 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +18 18 18 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +60 60 60 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or key1 < 3 or key1 > @maxv-11; +pk key1 key2 filler filler2 +1 1 1 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +990 990 990 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +select * from t1 where +(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10) +or +(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10); +pk key1 key2 filler filler2 +1 1 1 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +select * from t1 where +(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) +or +(key1 < 5) or (key1 > @maxv-10); +pk key1 key2 filler filler2 +1 1 1 filler-data filler-data-2 +1000 1000 1000 filler-data filler-data-2 +11 11 11 filler-data filler-data-2 +12 12 12 filler-data filler-data-2 +13 13 13 filler-data filler-data-2 +14 14 14 filler-data filler-data-2 +2 2 2 filler-data filler-data-2 +3 3 3 filler-data filler-data-2 +4 4 4 filler-data filler-data-2 +50 50 50 filler-data filler-data-2 +51 51 51 filler-data filler-data-2 +52 52 52 filler-data filler-data-2 +53 53 53 filler-data filler-data-2 +54 54 54 filler-data filler-data-2 +991 991 991 filler-data filler-data-2 +992 992 992 filler-data filler-data-2 +993 993 993 filler-data filler-data-2 +994 994 994 filler-data filler-data-2 +995 995 995 filler-data filler-data-2 +996 996 996 filler-data filler-data-2 +997 997 997 filler-data filler-data-2 +998 998 998 filler-data filler-data-2 +999 999 999 filler-data filler-data-2 +drop table t1; +set global rocksdb_force_flush_memtable_now=1; +#---------------- Clustered PK ROR-index_merge tests ----------------------------- +SET SESSION DEFAULT_STORAGE_ENGINE = RocksDB; +drop table if exists t1; +create table t1 +( +pk1 int not null, +pk2 int not null, +key1 int not null, +key2 int not null, +pktail1ok int not null, +pktail2ok int not null, +pktail3bad int not null, +pktail4bad int not null, +pktail5bad int not null, +pk2copy int not null, +badkey int not null, +filler1 char (200), +filler2 char (200), +key (key1), +key (key2), +/* keys with tails from CPK members */ +key (pktail1ok, pk1), +key (pktail2ok, pk1, pk2), +key (pktail3bad, pk2, pk1), +key (pktail4bad, pk1, pk2copy), +key (pktail5bad, pk1, pk2, pk2copy), +primary key (pk1, pk2) +); +explain select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref PRIMARY,key1 key1 8 const,const ROWS Using index condition +select * from t1 where pk1 = 1 and pk2 < 80 and key1=0; +pk1 pk2 key1 key2 pktail1ok pktail2ok pktail3bad pktail4bad pktail5bad pk2copy badkey filler1 filler2 +1 10 0 0 0 0 0 0 0 10 0 filler-data-10 filler2 +1 11 0 0 0 0 0 0 0 11 0 filler-data-11 filler2 +1 12 0 0 0 0 0 0 0 12 0 filler-data-12 filler2 +1 13 0 0 0 0 0 0 0 13 0 filler-data-13 filler2 +1 14 0 0 0 0 0 0 0 14 0 filler-data-14 filler2 +1 15 0 0 0 0 0 0 0 15 0 filler-data-15 filler2 +1 16 0 0 0 0 0 0 0 16 0 filler-data-16 filler2 +1 17 0 0 0 0 0 0 0 17 0 filler-data-17 filler2 +1 18 0 0 0 0 0 0 0 18 0 filler-data-18 filler2 +1 19 0 0 0 0 0 0 0 19 0 filler-data-19 filler2 +explain select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,key2 key1 4 const 1 Using index condition; Using where +select pk1,pk2 from t1 where key1 = 10 and key2=10 and 2*pk1+1 < 2*96+1; +pk1 pk2 +95 50 +95 51 +95 52 +95 53 +95 54 +95 55 +95 56 +95 57 +95 58 +95 59 +explain select * from t1 where badkey=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1 key1 4 const ROWS Using where +explain select * from t1 where pk1 < 7500 and key1 = 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge PRIMARY,key1 key1,PRIMARY 8,4 NULL ROWS Using intersect(key1,PRIMARY); Using where +explain select * from t1 where pktail1ok=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail1ok key1 4 const 1 Using where +explain select * from t1 where pktail2ok=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail2ok key1 4 const 1 Using where +explain select * from t1 where (pktail2ok=1 and pk1< 50000) or key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge PRIMARY,key1,pktail2ok pktail2ok,key1 8,4 NULL ROWS Using sort_union(pktail2ok,key1); Using where +explain select * from t1 where pktail3bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail3bad EITHER_KEY 4 const ROWS Using where +explain select * from t1 where pktail4bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail4bad key1 4 const ROWS Using where +explain select * from t1 where pktail5bad=1 and key1=10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,pktail5bad key1 4 const ROWS Using where +explain select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref key1,key2 key1 4 const 1 Using where +select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10; +pk1 pk2 key1 key2 +95 50 10 10 +95 51 10 10 +95 52 10 10 +95 53 10 10 +95 54 10 10 +95 55 10 10 +95 56 10 10 +95 57 10 10 +95 58 10 10 +95 59 10 10 +drop table t1; +create table t1 +( +RUNID varchar(22), +SUBMITNR varchar(5), +ORDERNR char(1), +PROGRAMM varchar(8), +TESTID varchar(4), +UCCHECK char(1), +ETEXT varchar(80), +ETEXT_TYPE char(1), +INFO char(1), +SEVERITY tinyint(3), +TADIRFLAG char(1), +PRIMARY KEY (RUNID,SUBMITNR,ORDERNR,PROGRAMM,TESTID,UCCHECK), +KEY `TVERM~KEY` (PROGRAMM,TESTID,UCCHECK) +) DEFAULT CHARSET=latin1; +update t1 set `ETEXT` = '', `ETEXT_TYPE`='', `INFO`='', `SEVERITY`='', `TADIRFLAG`='' +WHERE +`RUNID`= '' AND `SUBMITNR`= '' AND `ORDERNR`='' AND `PROGRAMM`='' AND +`TESTID`='' AND `UCCHECK`=''; +drop table t1; +# +# Bug#50402 Optimizer producing wrong results when using Index Merge on InnoDB +# +CREATE TABLE t1 (f1 INT, PRIMARY KEY (f1)); +INSERT INTO t1 VALUES (2); +CREATE TABLE t2 (f1 INT, f2 INT, f3 char(1), +PRIMARY KEY (f1), KEY (f2), KEY (f3) ); +INSERT INTO t2 VALUES (1, 1, 'h'), (2, 3, 'h'), (3, 2, ''), (4, 2, ''); +SELECT t1.f1 FROM t1 +WHERE (SELECT COUNT(*) FROM t2 WHERE t2.f3 = 'h' AND t2.f2 = t1.f1) = 0 AND t1.f1 = 2; +f1 +2 +EXPLAIN SELECT t1.f1 FROM t1 +WHERE (SELECT COUNT(*) FROM t2 WHERE t2.f3 = 'h' AND t2.f2 = t1.f1) = 0 AND t1.f1 = 2; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index +2 DEPENDENT SUBQUERY t2 ref f2,f3 f2 5 const 1 Using where +DROP TABLE t1,t2; +set global rocksdb_force_flush_memtable_now=1; +# +# Bug#11747423 32254: INDEX MERGE USED UNNECESSARILY +# +CREATE TABLE t1 ( +id INT NOT NULL PRIMARY KEY, +id2 INT NOT NULL, +id3 INT NOT NULL, +KEY (id2), +KEY (id3), +KEY covering_index (id2,id3) +) ENGINE=RocksDB; +INSERT INTO t1 VALUES (0, 0, 0), (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7); +INSERT INTO t1 SELECT id + 8, id2 + 8, id3 +8 FROM t1; +INSERT INTO t1 SELECT id + 16, 7, 0 FROM t1; +EXPLAIN SELECT SQL_NO_CACHE count(*) FROM t1 WHERE id2=7 AND id3=0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref id2,id3,covering_index id2 4 const 1 Using where +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result index f55662183ca81..291effa832c0c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/information_schema.result @@ -39,12 +39,10 @@ a int, b int, c int, d int, -e int, PRIMARY KEY (a) COMMENT "cf_a", KEY (b) COMMENT "cf_b", KEY (c) COMMENT "cf_c", -KEY (d) COMMENT "$per_index_cf", -KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB; +KEY (d) COMMENT "rev:cf_d") ENGINE=ROCKSDB; select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; TYPE NAME VALUE CF_FLAGS 0 default [0] @@ -52,8 +50,7 @@ CF_FLAGS 1 __system__ [0] CF_FLAGS 2 cf_a [0] CF_FLAGS 3 cf_b [0] CF_FLAGS 4 cf_c [0] -CF_FLAGS 5 test.t2.d [2] -CF_FLAGS 6 rev:cf_d [1] +CF_FLAGS 5 rev:cf_d [1] CREATE TABLE t3 (a INT, PRIMARY KEY (a)) ENGINE=ROCKSDB; insert into t3 (a) values (1), (2), (3); SET @ORIG_ROCKSDB_PAUSE_BACKGROUND_WORK = @@GLOBAL.ROCKSDB_PAUSE_BACKGROUND_WORK; @@ -69,7 +66,7 @@ SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; Variable_name Value rocksdb_pause_background_work ON DROP TABLE t3; -cf_id:0,index_id:268 +cf_id:0,index_id:267 SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0; SHOW GLOBAL VARIABLES LIKE 'ROCKSDB_PAUSE_BACKGROUND_WORK'; Variable_name Value diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result index 315d2d2b50bc3..6a773f84e83e1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue111.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue111.result @@ -28,5 +28,5 @@ begin; update t1 set col2=123456 where pk=0; commit; update t1 set col2=col2+1 where col1 < 10 limit 5; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction (snapshot conflict) drop table t1, ten, one_k; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue243_transactionStatus.result b/storage/rocksdb/mysql-test/rocksdb/r/issue243_transactionStatus.result new file mode 100644 index 0000000000000..83db308627d1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue243_transactionStatus.result @@ -0,0 +1,155 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( +id INT, +val1 INT, +val2 INT, +PRIMARY KEY (id) +) ENGINE=rocksdb; +INSERT INTO t1 VALUES(1,1,1),(2,1,2); +SELECT * FROM t1; +id val1 val2 +1 1 1 +2 1 2 +UPDATE t1 SET val1=2 WHERE id=2; +SELECT * FROM t1; +id val1 val2 +1 1 1 +2 2 2 +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +SET AUTOCOMMIT=0; +START TRANSACTION; +INSERT INTO t1 VALUES(20,1,1),(30,30,30); +SELECT * FROM t1; +id val1 val2 +1 1 1 +2 2 2 +20 1 1 +30 30 30 +UPDATE t1 SET val1=20, val2=20 WHERE id=20; +SELECT * FROM t1; +id val1 val2 +1 1 1 +2 2 2 +20 20 20 +30 30 30 +DELETE FROM t1 WHERE id=30; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +---SNAPSHOT, ACTIVE NUM sec +MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION +SHOW ENGINE rocksdb TRANSACTION STATUS +lock count 8, write count 4 +insert count 2, update count 1, delete count 1 +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +ROLLBACK; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +START TRANSACTION; +INSERT INTO t1 VALUES(40,40,40); +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +---SNAPSHOT, ACTIVE NUM sec +MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION +SHOW ENGINE rocksdb TRANSACTION STATUS +lock count 2, write count 1 +insert count 1, update count 0, delete count 0 +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +COMMIT; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +SET AUTOCOMMIT=1; +DROP TABLE t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 ( +id1 INT, +id2 INT, +value INT, +PRIMARY KEY (id1), +KEY (id2) +) ENGINE=rocksdb; +SET AUTOCOMMIT=0; +START TRANSACTION; +INSERT INTO t2 VALUES(1,2,0),(10,20,30); +UPDATE t2 SET value=3 WHERE id2=2; +DELETE FROM t2 WHERE id1=10; +SHOW ENGINE rocksdb TRANSACTION STATUS; +Type Name Status +SNAPSHOTS rocksdb +============================================================ +TIMESTAMP ROCKSDB TRANSACTION MONITOR OUTPUT +============================================================ +--------- +SNAPSHOTS +--------- +LIST OF SNAPSHOTS FOR EACH SESSION: +---SNAPSHOT, ACTIVE NUM sec +MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION +SHOW ENGINE rocksdb TRANSACTION STATUS +lock count 9, write count 7 +insert count 2, update count 1, delete count 1 +----------------------------------------- +END OF ROCKSDB TRANSACTION MONITOR OUTPUT +========================================= + +ROLLBACK; +SET AUTOCOMMIT=1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue255.result b/storage/rocksdb/mysql-test/rocksdb/r/issue255.result new file mode 100644 index 0000000000000..62875e378a476 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue255.result @@ -0,0 +1,21 @@ +CREATE TABLE t1 (pk BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT); +INSERT INTO t1 VALUES (5); +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB # Fixed 1 # # # # # 6 NULL NULL NULL latin1_swedish_ci NULL +INSERT INTO t1 VALUES ('538647864786478647864'); +Warnings: +Warning 1264 Out of range value for column 'pk' at row 1 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL +INSERT INTO t1 VALUES (); +ERROR 23000: Duplicate entry '9223372036854775807' for key 'PRIMARY' +SELECT * FROM t1; +pk +5 +9223372036854775807 +SHOW TABLE STATUS LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t1 ROCKSDB # Fixed 2 # # # # # 9223372036854775807 NULL NULL NULL latin1_swedish_ci NULL +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue495.result b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result index 2560ec577ed17..c7ac34c629445 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue495.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue495.result @@ -1,6 +1,4 @@ drop table if exists t; -Warnings: -Note 1051 Unknown table 'test.t' create table t ( a int, b int, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result new file mode 100644 index 0000000000000..d0bfb05fd1bb2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/lock_wait_timeout_stats.result @@ -0,0 +1,27 @@ +create table t (a int primary key) engine=rocksdb; +begin; +insert into t values (0); +set @@rocksdb_lock_wait_timeout=1; +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +ROW_LOCK_WAIT_TIMEOUTS +0 +begin; +set @@rocksdb_lock_wait_timeout=1; +begin; +insert into t values(0); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +ROW_LOCK_WAIT_TIMEOUTS +1 +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +ROW_LOCK_WAIT_TIMEOUTS +1 +insert into t values(0); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t.PRIMARY +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +ROW_LOCK_WAIT_TIMEOUTS +2 +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +ROW_LOCK_WAIT_TIMEOUTS +2 +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result index 70c270d55382b..cb76c15138e73 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/misc.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result @@ -47,6 +47,7 @@ help_relation help_keyword_id NULL NULL help_relation help_topic_id NULL NULL help_topic help_topic_id NULL NULL help_topic name NULL NULL +native_proc name NULL NULL ndb_binlog_index epoch NULL NULL ndb_binlog_index orig_epoch NULL NULL ndb_binlog_index orig_server_id NULL NULL diff --git a/storage/rocksdb/mysql-test/rocksdb/r/native_procedure.result b/storage/rocksdb/mysql-test/rocksdb/r/native_procedure.result new file mode 100644 index 0000000000000..725b74e12915e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/native_procedure.result @@ -0,0 +1,397 @@ +create database linkdb; +use linkdb; +create table linktable ( +id1 bigint(20) unsigned NOT NULL DEFAULT '0', +id2 bigint(20) unsigned NOT NULL DEFAULT '0', +link_type bigint(20) unsigned NOT NULL DEFAULT '0', +visibility tinyint(3) NOT NULL DEFAULT '0', +data varchar(255) NOT NULL DEFAULT '', +time bigint(20) unsigned NOT NULL DEFAULT '0', +version int(11) unsigned NOT NULL DEFAULT '0', +primary key (id1,id2,link_type) COMMENT 'cf_link_pk', +KEY id1_type (id1,link_type,visibility,time,version,data) +COMMENT 'rev:cf_link_id1_type') ENGINE=rocksdb DEFAULT COLLATE=latin1_bin; +create table counttable ( +id bigint(20) unsigned NOT NULL DEFAULT '0', +link_type bigint(20) unsigned NOT NULL DEFAULT '0', +count int(10) unsigned NOT NULL DEFAULT '0', +time bigint(20) unsigned NOT NULL DEFAULT '0', +version bigint(20) unsigned NOT NULL DEFAULT '0', +primary key (id,link_type) COMMENT 'cf_count_pk') +ENGINE=rocksdb DEFAULT COLLATE=latin1_bin; +create table nodetable ( +id bigint(20) unsigned NOT NULL AUTO_INCREMENT, +type int(10) unsigned NOT NULL, +version bigint(20) unsigned NOT NULL, +time int(10) unsigned NOT NULL, +data mediumtext NOT NULL, +primary key(id) COMMENT 'cf_node_pk') +ENGINE=rocksdb DEFAULT COLLATE=latin1_bin; +# +# Test nodeGet function +# +create native procedure nodeGet soname "NP_EXAMPLE_LIB"; +%nodeGet 1; +id type version time data +1 1 1 1000 data +%nodeGet 50 anything can go here; +id type version time data +50 1 1 50000 data +%nodeGet 39; +id type version time data +39 1 1 39000 data +%nodeGet 98; +id type version time data +98 1 1 98000 data +%nodeGet 1000; +id type version time data +%nodeGet -1; +ERROR HY000: Native procedure failed. (code: 7, msg: 'Invalid arguments: Conversion failed for field id.', query 'nodeGet -1') +%nodeGet asdf; +ERROR HY000: Native procedure failed. (code: 7, msg: 'Invalid arguments: Conversion failed for field id.', query 'nodeGet asdf') +# +# Test linkGetRange/linkGetId2s function +# +create native procedure linkGetRange soname "NP_EXAMPLE_LIB"; +create native procedure linkGetId2s soname "NP_EXAMPLE_LIB"; +%linkGetRange 1 1 1000 2000 0 1000; +id1 id2 link_type visibility data time version +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%linkGetRange 1 2 1000 2000 0 1000; +id1 id2 link_type visibility data time version +%linkGetRange 1 1 5000 2000 0 1000; +id1 id2 link_type visibility data time version +%linkGetRange 1 2 1000 6000 0 5; +id1 id2 link_type visibility data time version +1 5 2 1 data 2005 1 +1 4 2 1 data 2004 1 +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +%linkGetRange 1 2 1000 6000 0 2; +id1 id2 link_type visibility data time version +1 5 2 1 data 2005 1 +1 4 2 1 data 2004 1 +%linkGetRange 1 2 1000 6000 2 2; +id1 id2 link_type visibility data time version +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +%linkGetId2s 1 3 3 1 2 3; +id1 id2 link_type visibility data time version +1 1 3 1 data 3001 1 +1 2 3 1 data 3002 1 +1 3 3 1 data 3003 1 +%linkGetId2s 1 3 3 3 2 1; +id1 id2 link_type visibility data time version +1 3 3 1 data 3003 1 +1 2 3 1 data 3002 1 +1 1 3 1 data 3001 1 +%linkGetId2s 1 3 3 3 2 10; +id1 id2 link_type visibility data time version +1 3 3 1 data 3003 1 +1 2 3 1 data 3002 1 +%linkGetId2s 1 3 3 3 2 1 asdf; +id1 id2 link_type visibility data time version +1 3 3 1 data 3003 1 +1 2 3 1 data 3002 1 +1 1 3 1 data 3001 1 +%linkGetId2s 1 3 0; +id1 id2 link_type visibility data time version +%linkGetId2s 1 3 4 2; +ERROR HY000: Incorrect arguments to native procedure. (query 'linkGetId2s 1 3 4 2') +# +# Test rangeQuery function +# +create native procedure rangeQuery soname "NP_EXAMPLE_LIB"; +%rangeQuery 1 0 0 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 1 1 1 data 1001 1 +1 2 1 1 data 1002 1 +1 3 1 1 data 1003 1 +1 4 1 1 data 1004 1 +1 5 1 1 data 1005 1 +%rangeQuery 1 0 1 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 1 1 1 data 1001 1 +1 2 1 1 data 1002 1 +1 3 1 1 data 1003 1 +1 4 1 1 data 1004 1 +%rangeQuery 1 1 0 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 2 1 1 data 1002 1 +1 3 1 1 data 1003 1 +1 4 1 1 data 1004 1 +1 5 1 1 data 1005 1 +%rangeQuery 1 1 1 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 2 1 1 data 1002 1 +1 3 1 1 data 1003 1 +1 4 1 1 data 1004 1 +%rangeQuery 0 0 0 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%rangeQuery 0 0 1 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%rangeQuery 0 1 0 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +%rangeQuery 0 1 1 4 id1 1 link_type 1 visibility 1 time 1001 4 id1 1 link_type 1 visibility 1 time 1005; +id1 id2 link_type visibility data time version +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +%rangeQuery 1 0 0 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +1 1 1 1 data 1001 1 +1 2 1 1 data 1002 1 +1 3 1 1 data 1003 1 +1 4 1 1 data 1004 1 +1 5 1 1 data 1005 1 +1 1 2 1 data 2001 1 +1 2 2 1 data 2002 1 +1 3 2 1 data 2003 1 +1 4 2 1 data 2004 1 +1 5 2 1 data 2005 1 +%rangeQuery 1 0 1 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +1 1 1 1 data 1001 1 +1 2 1 1 data 1002 1 +1 3 1 1 data 1003 1 +1 4 1 1 data 1004 1 +1 5 1 1 data 1005 1 +%rangeQuery 1 1 0 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +1 1 2 1 data 2001 1 +1 2 2 1 data 2002 1 +1 3 2 1 data 2003 1 +1 4 2 1 data 2004 1 +1 5 2 1 data 2005 1 +%rangeQuery 1 1 1 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +%rangeQuery 0 0 0 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +1 5 2 1 data 2005 1 +1 4 2 1 data 2004 1 +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%rangeQuery 0 0 1 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%rangeQuery 0 1 0 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +1 5 2 1 data 2005 1 +1 4 2 1 data 2004 1 +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +%rangeQuery 0 1 1 2 id1 1 link_type 1 2 id1 1 link_type 2; +id1 id2 link_type visibility data time version +%rangeQuery 0 0 0 2 id1 1 link_type 1 4 id1 1 link_type 2 visibility 1 time 2004; +id1 id2 link_type visibility data time version +1 4 2 1 data 2004 1 +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%rangeQuery 0 0 1 2 id1 1 link_type 1 4 id1 1 link_type 2 visibility 1 time 2004; +id1 id2 link_type visibility data time version +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +%rangeQuery 0 1 0 2 id1 1 link_type 1 4 id1 1 link_type 2 visibility 1 time 2004; +id1 id2 link_type visibility data time version +1 4 2 1 data 2004 1 +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +%rangeQuery 0 1 1 2 id1 1 link_type 1 4 id1 1 link_type 2 visibility 1 time 2004; +id1 id2 link_type visibility data time version +1 3 2 1 data 2003 1 +1 2 2 1 data 2002 1 +1 1 2 1 data 2001 1 +# +# Test countGet function +# +create native procedure countGet soname "NP_EXAMPLE_LIB"; +%countGet 1 1; +count +2 +%countGet 10 1; +count +20 +%countGet 111 1; +count +%countGet 1 111; +count +%countGet -1 1 1; +ERROR HY000: Native procedure failed. (code: 7, msg: 'Invalid arguments: Conversion failed for field id.', query 'countGet -1 1 1') +%countGet -1 1 2; +ERROR HY000: Native procedure failed. (code: 7, msg: 'Invalid arguments: Conversion failed for field id.', query 'countGet -1 1 2') +%countGet; +ERROR HY000: Incorrect arguments to native procedure. (query 'countGet') +# +# Check that DDL locks are respected. +# +create native procedure sleepRange soname "NP_EXAMPLE_LIB"; +%sleepRange 1; +set @start_lock_wait_timeout = @@session.lock_wait_timeout; +set lock_wait_timeout = 1; +drop table counttable; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: linkdb.counttable +set lock_wait_timeout = @start_lock_wait_timeout; +count +2 +4 +6 +8 +10 +12 +14 +16 +18 +20 +# +# Check that queries can be killed. +# +%sleepRange 0; +kill query ID; +ERROR 70100: Query execution was interrupted +# +# Check that native procedures work properly with transactions. +# +use linkdb; +set session transaction isolation level repeatable read; +%countGet 1 1; +count +2 +begin; +select count from counttable where id = 1 and link_type = 1; +count +2 +%countGet 1 1; +count +2 +# Before update +%countGet 1 1; +count +2 +update counttable set count = count + 1 where id = 1 and link_type = 1; +# After update +%countGet 1 1; +count +3 +# Unchanged due to consistent reads +%countGet 1 1; +count +2 +# +# Check index reads on prefixed data. +# +alter table linktable drop index id1_type; +alter table linktable +add index id1_type (id1,link_type,visibility,time,version,data(1)) +COMMENT 'rev:cf_link_id1_type'; +%linkGetRange 1 1 1000 2000 0 1000; +id1 id2 link_type visibility data time version +1 5 1 1 data 1005 1 +1 4 1 1 data 1004 1 +1 3 1 1 data 1003 1 +1 2 1 1 data 1002 1 +1 1 1 1 data 1001 1 +# +# Check correct error handling for various scenarios. +# +create native procedure invalidKey1 soname "NP_EXAMPLE_LIB"; +%invalidKey1; +ERROR HY000: Native procedure failed. (code: 6, msg: 'Not found: ', query 'invalidKey1') +create native procedure invalidOpen1 soname "NP_EXAMPLE_LIB"; +%invalidOpen1; +ERROR HY000: Native procedure failed. (code: 5, msg: 'Cannot reinitialize: ', query 'invalidOpen1') +create native procedure invalidOpen2 soname "NP_EXAMPLE_LIB"; +%invalidOpen2; +ERROR HY000: Native procedure failed. (code: 5, msg: 'Cannot reinitialize: ', query 'invalidOpen2') +create native procedure invalidOpen3 soname "NP_EXAMPLE_LIB"; +%invalidOpen3; +ERROR HY000: Native procedure failed. (code: 5, msg: 'Cannot reinitialize: ', query 'invalidOpen3') +create native procedure invalidOpen4 soname "NP_EXAMPLE_LIB"; +%invalidOpen4; +ERROR HY000: Native procedure failed. (code: 5, msg: 'Cannot reinitialize: ', query 'invalidOpen4') +%invalidProcedure; +ERROR HY000: Unknown native procedure. 'invalidProcedure' +create native procedure invalidProcedure soname "invalid.so"; +ERROR HY000: Can't open shared library +create native procedure invalidProcedure soname "NP_EXAMPLE_LIB"; +ERROR HY000: Can't find symbol 'invalidProcedure' in library +# +# Check that our functions are reloaded after restart. +# +select * from mysql.native_proc order by name; +name type dl lua +countGet native np_example.so +invalidKey1 native np_example.so +invalidOpen1 native np_example.so +invalidOpen2 native np_example.so +invalidOpen3 native np_example.so +invalidOpen4 native np_example.so +linkGetId2s native np_example.so +linkGetRange native np_example.so +nodeGet native np_example.so +rangeQuery native np_example.so +sleepRange native np_example.so +drop native procedure nodeGet; +create native procedure nodeGet soname "NP_EXAMPLE_LIB"; +ERROR HY000: Native procedure 'nodeGet' exists. +drop native procedure linkGetRange; +drop native procedure linkGetId2s; +drop native procedure countGet; +drop native procedure sleepRange; +drop native procedure rangeQuery; +drop native procedure invalidKey1; +drop native procedure invalidOpen1; +drop native procedure invalidOpen2; +drop native procedure invalidOpen3; +drop native procedure invalidOpen4; +%nodeGet 1; +ERROR HY000: Unknown native procedure. 'nodeGet' +# +# Check that our functions are unloaded after restart. +# +select * from mysql.native_proc order by name; +name type dl lua +%nodeGet 1; +ERROR HY000: Unknown native procedure. 'nodeGet' +drop database linkdb; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/prefix_extractor_override.result b/storage/rocksdb/mysql-test/rocksdb/r/prefix_extractor_override.result new file mode 100644 index 0000000000000..9c7d189e93599 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/prefix_extractor_override.result @@ -0,0 +1,76 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4) comment 'cf1') ENGINE=rocksdb collate latin1_bin; +set global rocksdb_force_flush_memtable_now = 1; + +Original Prefix Extractor: + +SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%'; +CF_NAME OPTION_TYPE VALUE +__system__ PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +cf1 PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +default PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +COUNT(*) +1 +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +variable_value-@u +1 + +Prefix Extractor (after override_cf_options set, should not be changed): + +SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%'; +CF_NAME OPTION_TYPE VALUE +__system__ PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +cf1 PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +default PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 + +Restarting with new Prefix Extractor... + + +Changed Prefix Extractor (after restart): + +SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%'; +CF_NAME OPTION_TYPE VALUE +__system__ PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +cf1 PREFIX_EXTRACTOR rocksdb.CappedPrefix.26 +default PREFIX_EXTRACTOR rocksdb.CappedPrefix.24 +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +COUNT(*) +1 +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +variable_value-@u +0 +set global rocksdb_force_flush_memtable_now = 1; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +COUNT(*) +1 +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +variable_value-@u +1 +SELECT COUNT(*) FROM information_schema.rocksdb_index_file_map WHERE COLUMN_FAMILY != 1; +COUNT(*) +2 +UPDATE t1 SET id1=1,id2 = 30,id3 = 30 WHERE id4 >= 0 AND id4 <=10; +set global rocksdb_force_flush_memtable_now = 1; +SELECT COUNT(*) FROM information_schema.rocksdb_index_file_map WHERE COLUMN_FAMILY != 1; +COUNT(*) +3 +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +COUNT(*) +0 +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +variable_value-@u +2 +set global rocksdb_compact_cf='cf1'; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=30 AND id3=30; +COUNT(*) +11 +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +variable_value-@u +1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 8a02bb8258b90..3f08450db6d54 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -862,8 +862,6 @@ rocksdb_advise_random_on_open ON rocksdb_allow_concurrent_memtable_write OFF rocksdb_allow_mmap_reads OFF rocksdb_allow_mmap_writes OFF -rocksdb_background_sync OFF -rocksdb_base_background_compactions 1 rocksdb_blind_delete_primary_key OFF rocksdb_block_cache_size 536870912 rocksdb_block_restart_interval 16 @@ -889,22 +887,29 @@ rocksdb_datadir ./.rocksdb rocksdb_db_write_buffer_size 0 rocksdb_deadlock_detect OFF rocksdb_debug_optimizer_no_zero_cardinality ON +rocksdb_debug_ttl_read_filter_ts 0 +rocksdb_debug_ttl_rec_ts 0 +rocksdb_debug_ttl_snapshot_ts 0 rocksdb_default_cf_options -rocksdb_delayed_write_rate 16777216 +rocksdb_delayed_write_rate 0 rocksdb_delete_obsolete_files_period_micros 21600000000 rocksdb_enable_2pc ON rocksdb_enable_bulk_load_api ON -rocksdb_enable_thread_tracking OFF +rocksdb_enable_thread_tracking ON +rocksdb_enable_ttl ON +rocksdb_enable_ttl_read_filtering ON rocksdb_enable_write_thread_adaptive_yield OFF rocksdb_error_if_exists OFF rocksdb_flush_log_at_trx_commit 1 rocksdb_flush_memtable_on_analyze ON rocksdb_force_compute_memtable_stats ON +rocksdb_force_flush_memtable_and_lzero_now OFF rocksdb_force_flush_memtable_now OFF rocksdb_force_index_records_in_range 0 rocksdb_hash_index_allow_collision ON rocksdb_index_type kBinarySearch rocksdb_info_log_level error_level +rocksdb_io_write_timeout 0 rocksdb_is_fd_close_on_exec ON rocksdb_keep_log_file_num 1000 rocksdb_lock_scanned_rows OFF @@ -912,8 +917,7 @@ rocksdb_lock_wait_timeout 1 rocksdb_log_file_time_to_roll 0 rocksdb_manifest_preallocation_size 4194304 rocksdb_master_skip_tx_api OFF -rocksdb_max_background_compactions 1 -rocksdb_max_background_flushes 1 +rocksdb_max_background_jobs 2 rocksdb_max_log_file_size 0 rocksdb_max_manifest_file_size 18446744073709551615 rocksdb_max_open_files -1 @@ -935,11 +939,13 @@ rocksdb_print_snapshot_conflict_queries OFF rocksdb_rate_limiter_bytes_per_sec 0 rocksdb_read_free_rpl_tables rocksdb_records_in_range 50 +rocksdb_reset_stats OFF rocksdb_seconds_between_stat_computes 3600 rocksdb_signal_drop_index_thread OFF rocksdb_skip_bloom_filter_on_read OFF rocksdb_skip_fill_cache OFF rocksdb_skip_unique_check_tables .* +rocksdb_sst_mgr_rate_bytes_per_sec 0 rocksdb_stats_dump_period_sec 600 rocksdb_store_row_debug_checksums OFF rocksdb_strict_collation_check OFF @@ -949,9 +955,10 @@ rocksdb_table_stats_sampling_pct 10 rocksdb_tmpdir rocksdb_trace_sst_api OFF rocksdb_unsafe_for_binlog OFF +rocksdb_update_cf_options rocksdb_use_adaptive_mutex OFF +rocksdb_use_direct_io_for_flush_and_compaction OFF rocksdb_use_direct_reads OFF -rocksdb_use_direct_writes OFF rocksdb_use_fsync OFF rocksdb_validate_tables 1 rocksdb_verify_row_debug_checksums OFF @@ -961,6 +968,7 @@ rocksdb_wal_recovery_mode 1 rocksdb_wal_size_limit_mb 0 rocksdb_wal_ttl_seconds 0 rocksdb_whole_key_filtering ON +rocksdb_write_batch_max_bytes 0 rocksdb_write_disable_wal OFF rocksdb_write_ignore_missing_column_families OFF create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb; @@ -1316,7 +1324,7 @@ insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables set @tmp1= @@rocksdb_max_row_locks; set rocksdb_max_row_locks= 20; update t1 set a=a+10; -ERROR HY000: Got error 196 'Number of locks held reached @@rocksdb_max_row_locks.' from ROCKSDB +ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit DROP TABLE t1; # # Test AUTO_INCREMENT behavior problem, @@ -1427,10 +1435,15 @@ rocksdb_rows_inserted # rocksdb_rows_read # rocksdb_rows_updated # rocksdb_rows_deleted_blind # +rocksdb_rows_expired # rocksdb_system_rows_deleted # rocksdb_system_rows_inserted # rocksdb_system_rows_read # rocksdb_system_rows_updated # +rocksdb_memtable_total # +rocksdb_memtable_unflushed # +rocksdb_queries_point # +rocksdb_queries_range # rocksdb_block_cache_add # rocksdb_block_cache_data_hit # rocksdb_block_cache_data_miss # @@ -1456,9 +1469,6 @@ rocksdb_flush_write_bytes # rocksdb_getupdatessince_calls # rocksdb_git_date # rocksdb_git_hash # -rocksdb_l0_num_files_stall_micros # -rocksdb_l0_slowdown_micros # -rocksdb_memtable_compaction_micros # rocksdb_memtable_hit # rocksdb_memtable_miss # rocksdb_no_file_closes # @@ -1486,6 +1496,7 @@ rocksdb_number_superversion_cleanups # rocksdb_number_superversion_releases # rocksdb_rate_limit_delay_millis # rocksdb_snapshot_conflict_errors # +rocksdb_stall_micros # rocksdb_wal_bytes # rocksdb_wal_group_syncs # rocksdb_wal_synced # @@ -1500,10 +1511,15 @@ ROCKSDB_ROWS_INSERTED ROCKSDB_ROWS_READ ROCKSDB_ROWS_UPDATED ROCKSDB_ROWS_DELETED_BLIND +ROCKSDB_ROWS_EXPIRED ROCKSDB_SYSTEM_ROWS_DELETED ROCKSDB_SYSTEM_ROWS_INSERTED ROCKSDB_SYSTEM_ROWS_READ ROCKSDB_SYSTEM_ROWS_UPDATED +ROCKSDB_MEMTABLE_TOTAL +ROCKSDB_MEMTABLE_UNFLUSHED +ROCKSDB_QUERIES_POINT +ROCKSDB_QUERIES_RANGE ROCKSDB_BLOCK_CACHE_ADD ROCKSDB_BLOCK_CACHE_DATA_HIT ROCKSDB_BLOCK_CACHE_DATA_MISS @@ -1529,9 +1545,6 @@ ROCKSDB_FLUSH_WRITE_BYTES ROCKSDB_GETUPDATESSINCE_CALLS ROCKSDB_GIT_DATE ROCKSDB_GIT_HASH -ROCKSDB_L0_NUM_FILES_STALL_MICROS -ROCKSDB_L0_SLOWDOWN_MICROS -ROCKSDB_MEMTABLE_COMPACTION_MICROS ROCKSDB_MEMTABLE_HIT ROCKSDB_MEMTABLE_MISS ROCKSDB_NO_FILE_CLOSES @@ -1559,6 +1572,7 @@ ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_RELEASES ROCKSDB_RATE_LIMIT_DELAY_MILLIS ROCKSDB_SNAPSHOT_CONFLICT_ERRORS +ROCKSDB_STALL_MICROS ROCKSDB_WAL_BYTES ROCKSDB_WAL_GROUP_SYNCS ROCKSDB_WAL_SYNCED @@ -1575,10 +1589,15 @@ ROCKSDB_ROWS_INSERTED ROCKSDB_ROWS_READ ROCKSDB_ROWS_UPDATED ROCKSDB_ROWS_DELETED_BLIND +ROCKSDB_ROWS_EXPIRED ROCKSDB_SYSTEM_ROWS_DELETED ROCKSDB_SYSTEM_ROWS_INSERTED ROCKSDB_SYSTEM_ROWS_READ ROCKSDB_SYSTEM_ROWS_UPDATED +ROCKSDB_MEMTABLE_TOTAL +ROCKSDB_MEMTABLE_UNFLUSHED +ROCKSDB_QUERIES_POINT +ROCKSDB_QUERIES_RANGE ROCKSDB_BLOCK_CACHE_ADD ROCKSDB_BLOCK_CACHE_DATA_HIT ROCKSDB_BLOCK_CACHE_DATA_MISS @@ -1604,9 +1623,6 @@ ROCKSDB_FLUSH_WRITE_BYTES ROCKSDB_GETUPDATESSINCE_CALLS ROCKSDB_GIT_DATE ROCKSDB_GIT_HASH -ROCKSDB_L0_NUM_FILES_STALL_MICROS -ROCKSDB_L0_SLOWDOWN_MICROS -ROCKSDB_MEMTABLE_COMPACTION_MICROS ROCKSDB_MEMTABLE_HIT ROCKSDB_MEMTABLE_MISS ROCKSDB_NO_FILE_CLOSES @@ -1634,6 +1650,7 @@ ROCKSDB_NUMBER_SUPERVERSION_CLEANUPS ROCKSDB_NUMBER_SUPERVERSION_RELEASES ROCKSDB_RATE_LIMIT_DELAY_MILLIS ROCKSDB_SNAPSHOT_CONFLICT_ERRORS +ROCKSDB_STALL_MICROS ROCKSDB_WAL_BYTES ROCKSDB_WAL_GROUP_SYNCS ROCKSDB_WAL_SYNCED @@ -1742,6 +1759,7 @@ INSERT INTO t1 SET id=123, blob_col='' ON DUPLICATE KEY UPDATE bl DROP TABLE t1; # # Issue #17: Automatic per-index column families +# (Now deprecated) # create table t1 ( id int not null, @@ -1749,40 +1767,7 @@ key1 int, PRIMARY KEY (id), index (key1) comment '$per_index_cf' ) engine=rocksdb; -#Same CF ids with different CF flags -create table t1_err ( -id int not null, -key1 int, -PRIMARY KEY (id), -index (key1) comment 'test.t1.key1' -) engine=rocksdb; -ERROR HY000: Column family ('test.t1.key1') flag (0) is different from an existing flag (2). Assign a new CF flag, or do not change existing CF flag. -create table t1_err ( -id int not null, -key1 int, -PRIMARY KEY (id), -index (key1) comment 'test.t1.key2' -) engine=rocksdb; -drop table t1_err; -# Unfortunately there is no way to check which column family everything goes to -insert into t1 values (1,1); -select * from t1; -id key1 -1 1 -# Check that ALTER and RENAME are disallowed -alter table t1 add col2 int; -ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF' -rename table t1 to t2; -ERROR 42000: This version of MySQL doesn't yet support 'ALTER TABLE on table with per-index CF' -drop table t1; -# Check detection of typos in $per_index_cf -create table t1 ( -id int not null, -key1 int, -PRIMARY KEY (id), -index (key1) comment '$per_idnex_cf' -)engine=rocksdb; -ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf.' +ERROR HY000: The per-index column family option has been deprecated # # Issue #22: SELECT ... FOR UPDATE takes a long time # @@ -2256,6 +2241,7 @@ INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(1, 3); SELECT * FROM t1; id value +1 3 REPLACE INTO t1 VALUES(4, 4); ERROR HY000: When unique checking is disabled in MyRocks, INSERT,UPDATE,LOAD statements with clauses that update or replace the key (i.e. INSERT ON DUPLICATE KEY UPDATE, REPLACE) are not allowed. Query: REPLACE INTO t1 VALUES(4, 4) INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result index 05ac3f4f62dc6..991861537969f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result @@ -348,6 +348,9 @@ ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5'; cf_name another_cf_for_p5 +ANALYZE TABLE t2; +Table Op Msg_type Msg_text +test.t2 analyze status OK EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567; id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE t2 custom_p2 ref col3 col3 258 const 1 Using where @@ -407,3 +410,14 @@ cf_name notsharedcf DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; +CREATE TABLE t1 ( +a INT NOT NULL, +PRIMARY KEY (a) COMMENT 'p1_cfname=foo;' +) ENGINE=ROCKSDB +PARTITION BY LIST COLUMNS(a) +(PARTITION p1 VALUES IN (1) ENGINE = ROCKSDB); +INSERT INTO t1 values (1); +TRUNCATE TABLE t1; +SELECT * FROM t1; +a +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result index a39fa0a429e20..744963b534fa5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_locks.result @@ -57,7 +57,7 @@ UPDATE t1 SET value=30 WHERE id=3; COMMIT; connection con1; SELECT * FROM t1 WHERE id=3 FOR UPDATE; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction (snapshot conflict) ROLLBACK; disconnect con1; connection default; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result index acf62d0bb700c..6d4b3e7c33935 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result @@ -94,7 +94,7 @@ drop table t1, t2; CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; INSERT INTO t1 VALUES(1,'a'); RENAME TABLE t1 TO db3.t3; -ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: 122 - Internal (unspecified) error in handler) +ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: -1 - Unknown error -1) SELECT * FROM t1; c1 c2 1 a diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result index b0a1c4080066b..1b872f82c587e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rqg_runtime.result @@ -1,5 +1,6 @@ call mtr.add_suppression("Did not write failed "); call mtr.add_suppression("Can't open and lock privilege tables"); +call mtr.add_suppression("Attempt to delete the trigger file"); SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER; CREATE TABLE mysql.user_temp LIKE mysql.user; INSERT mysql.user_temp SELECT * FROM mysql.user; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result index 19d794da848eb..d1072eee4ad14 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/show_engine.result @@ -7,14 +7,17 @@ CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB; CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB PARTITION BY KEY(l) PARTITIONS 4; +SET GLOBAL rocksdb_force_flush_memtable_now=1; SHOW ENGINE rocksdb STATUS; Type Name Status -DBSTATS rocksdb # -CF_COMPACTION __system__ # -CF_COMPACTION cf_t1 # -CF_COMPACTION default # -CF_COMPACTION rev:cf_t2 # -Memory_Stats rocksdb # +STATISTICS # # +DBSTATS # # +CF_COMPACTION # # +CF_COMPACTION # # +CF_COMPACTION # # +CF_COMPACTION # # +MEMORY_STATS # # +BG_THREADS # # INSERT INTO t1 VALUES (1), (2), (3); SELECT COUNT(*) FROM t1; COUNT(*) @@ -125,19 +128,30 @@ __system__ PREFIX_EXTRACTOR # __system__ COMPACTION_STYLE # __system__ COMPACTION_OPTIONS_UNIVERSAL # __system__ COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # -__system__ BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # -__system__ BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # -__system__ BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # -__system__ BLOCK_BASED_TABLE_FACTORY::CHECKSUM # -__system__ BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # -__system__ BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # -__system__ BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # -__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # -__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # -__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # -__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # -__system__ BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # -__system__ BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +__system__ TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY # +__system__ TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +__system__ TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY # +__system__ TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE # +__system__ TABLE_FACTORY::INDEX_TYPE # +__system__ TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +__system__ TABLE_FACTORY::CHECKSUM # +__system__ TABLE_FACTORY::NO_BLOCK_CACHE # +__system__ TABLE_FACTORY::BLOCK_CACHE # +__system__ TABLE_FACTORY::BLOCK_CACHE_NAME # +__system__ TABLE_FACTORY::BLOCK_CACHE_OPTIONS # +__system__ TABLE_FACTORY::CAPACITY # +__system__ TABLE_FACTORY::NUM_SHARD_BITS # +__system__ TABLE_FACTORY::STRICT_CAPACITY_LIMIT # +__system__ TABLE_FACTORY::HIGH_PRI_POOL_RATIO # +__system__ TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +__system__ TABLE_FACTORY::PERSISTENT_CACHE # +__system__ TABLE_FACTORY::BLOCK_SIZE # +__system__ TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +__system__ TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +__system__ TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +__system__ TABLE_FACTORY::FILTER_POLICY # +__system__ TABLE_FACTORY::WHOLE_KEY_FILTERING # +__system__ TABLE_FACTORY::FORMAT_VERSION # cf_t1 COMPARATOR # cf_t1 MERGE_OPERATOR # cf_t1 COMPACTION_FILTER # @@ -179,19 +193,30 @@ cf_t1 PREFIX_EXTRACTOR # cf_t1 COMPACTION_STYLE # cf_t1 COMPACTION_OPTIONS_UNIVERSAL # cf_t1 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # -cf_t1 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # -cf_t1 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # -cf_t1 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # -cf_t1 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # -cf_t1 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # -cf_t1 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # -cf_t1 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # -cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # -cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # -cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # -cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # -cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # -cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +cf_t1 TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY # +cf_t1 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +cf_t1 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY # +cf_t1 TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE # +cf_t1 TABLE_FACTORY::INDEX_TYPE # +cf_t1 TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +cf_t1 TABLE_FACTORY::CHECKSUM # +cf_t1 TABLE_FACTORY::NO_BLOCK_CACHE # +cf_t1 TABLE_FACTORY::BLOCK_CACHE # +cf_t1 TABLE_FACTORY::BLOCK_CACHE_NAME # +cf_t1 TABLE_FACTORY::BLOCK_CACHE_OPTIONS # +cf_t1 TABLE_FACTORY::CAPACITY # +cf_t1 TABLE_FACTORY::NUM_SHARD_BITS # +cf_t1 TABLE_FACTORY::STRICT_CAPACITY_LIMIT # +cf_t1 TABLE_FACTORY::HIGH_PRI_POOL_RATIO # +cf_t1 TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +cf_t1 TABLE_FACTORY::PERSISTENT_CACHE # +cf_t1 TABLE_FACTORY::BLOCK_SIZE # +cf_t1 TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +cf_t1 TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +cf_t1 TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +cf_t1 TABLE_FACTORY::FILTER_POLICY # +cf_t1 TABLE_FACTORY::WHOLE_KEY_FILTERING # +cf_t1 TABLE_FACTORY::FORMAT_VERSION # default COMPARATOR # default MERGE_OPERATOR # default COMPACTION_FILTER # @@ -233,19 +258,30 @@ default PREFIX_EXTRACTOR # default COMPACTION_STYLE # default COMPACTION_OPTIONS_UNIVERSAL # default COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # -default BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # -default BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # -default BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # -default BLOCK_BASED_TABLE_FACTORY::CHECKSUM # -default BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # -default BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # -default BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # -default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # -default BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # -default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # -default BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # -default BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # -default BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +default TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY # +default TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +default TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY # +default TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE # +default TABLE_FACTORY::INDEX_TYPE # +default TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +default TABLE_FACTORY::CHECKSUM # +default TABLE_FACTORY::NO_BLOCK_CACHE # +default TABLE_FACTORY::BLOCK_CACHE # +default TABLE_FACTORY::BLOCK_CACHE_NAME # +default TABLE_FACTORY::BLOCK_CACHE_OPTIONS # +default TABLE_FACTORY::CAPACITY # +default TABLE_FACTORY::NUM_SHARD_BITS # +default TABLE_FACTORY::STRICT_CAPACITY_LIMIT # +default TABLE_FACTORY::HIGH_PRI_POOL_RATIO # +default TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +default TABLE_FACTORY::PERSISTENT_CACHE # +default TABLE_FACTORY::BLOCK_SIZE # +default TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +default TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +default TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +default TABLE_FACTORY::FILTER_POLICY # +default TABLE_FACTORY::WHOLE_KEY_FILTERING # +default TABLE_FACTORY::FORMAT_VERSION # rev:cf_t2 COMPARATOR # rev:cf_t2 MERGE_OPERATOR # rev:cf_t2 COMPACTION_FILTER # @@ -287,19 +323,30 @@ rev:cf_t2 PREFIX_EXTRACTOR # rev:cf_t2 COMPACTION_STYLE # rev:cf_t2 COMPACTION_OPTIONS_UNIVERSAL # rev:cf_t2 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::CHECKSUM # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL # -rev:cf_t2 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION # +rev:cf_t2 TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY # +rev:cf_t2 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS # +rev:cf_t2 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY # +rev:cf_t2 TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE # +rev:cf_t2 TABLE_FACTORY::INDEX_TYPE # +rev:cf_t2 TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION # +rev:cf_t2 TABLE_FACTORY::CHECKSUM # +rev:cf_t2 TABLE_FACTORY::NO_BLOCK_CACHE # +rev:cf_t2 TABLE_FACTORY::BLOCK_CACHE # +rev:cf_t2 TABLE_FACTORY::BLOCK_CACHE_NAME # +rev:cf_t2 TABLE_FACTORY::BLOCK_CACHE_OPTIONS # +rev:cf_t2 TABLE_FACTORY::CAPACITY # +rev:cf_t2 TABLE_FACTORY::NUM_SHARD_BITS # +rev:cf_t2 TABLE_FACTORY::STRICT_CAPACITY_LIMIT # +rev:cf_t2 TABLE_FACTORY::HIGH_PRI_POOL_RATIO # +rev:cf_t2 TABLE_FACTORY::BLOCK_CACHE_COMPRESSED # +rev:cf_t2 TABLE_FACTORY::PERSISTENT_CACHE # +rev:cf_t2 TABLE_FACTORY::BLOCK_SIZE # +rev:cf_t2 TABLE_FACTORY::BLOCK_SIZE_DEVIATION # +rev:cf_t2 TABLE_FACTORY::BLOCK_RESTART_INTERVAL # +rev:cf_t2 TABLE_FACTORY::INDEX_BLOCK_RESTART_INTERVAL # +rev:cf_t2 TABLE_FACTORY::FILTER_POLICY # +rev:cf_t2 TABLE_FACTORY::WHOLE_KEY_FILTERING # +rev:cf_t2 TABLE_FACTORY::FORMAT_VERSION # DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; @@ -336,6 +383,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION: MySQL thread id TID, OS thread handle PTR, query id QID localhost root ACTION SHOW ENGINE rocksdb TRANSACTION STATUS lock count 0, write count 0 +insert count 0, update count 0, delete count 0 ----------------------------------------- END OF ROCKSDB TRANSACTION MONITOR OUTPUT ========================================= diff --git a/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result index ef9fafc852a04..da29f32541028 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/singledelete.result @@ -63,4 +63,24 @@ true select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; case when variable_value-@d < 10 then 'true' else 'false' end true -DROP TABLE t1, t2, t3, t4, t5; +CREATE TABLE t6 ( +pk VARCHAR(64) COLLATE latin1_swedish_ci PRIMARY KEY +) ENGINE=RocksDB; +INSERT INTO t6 VALUES ('a'); +SET GLOBAL rocksdb_force_flush_memtable_now=1; +SELECT * FROM t6; +pk +a +UPDATE t6 SET pk='A' WHERE pk='a'; +SELECT * FROM t6; +pk +A +DELETE FROM t6 where pk='A'; +SELECT should return nothing; +SELECT * FROM t6; +pk +SET GLOBAL rocksdb_force_flush_memtable_now=1; +SELECT should return nothing; +SELECT * FROM t6; +pk +DROP TABLE t1, t2, t3, t4, t5, t6; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result index e0520f5a31b29..bbc14cf200b9d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/table_stats.result @@ -4,6 +4,6 @@ SELECT COUNT(*) FROM t1; COUNT(*) 1000 SELECT * FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME = "t1"; -TABLE_SCHEMA TABLE_NAME TABLE_ENGINE ROWS_INSERTED ROWS_UPDATED ROWS_DELETED ROWS_READ ROWS_REQUESTED COMPRESSED_PAGE_SIZE COMPRESS_PADDING COMPRESS_OPS COMPRESS_OPS_OK COMPRESS_PRIMARY_OPS COMPRESS_PRIMARY_OPS_OK COMPRESS_USECS COMPRESS_OK_USECS COMPRESS_PRIMARY_USECS COMPRESS_PRIMARY_OK_USECS UNCOMPRESS_OPS UNCOMPRESS_USECS ROWS_INDEX_FIRST ROWS_INDEX_NEXT IO_READ_BYTES IO_READ_REQUESTS IO_READ_SVC_USECS IO_READ_SVC_USECS_MAX IO_READ_WAIT_USECS IO_READ_WAIT_USECS_MAX IO_READ_SLOW_IOS IO_WRITE_BYTES IO_WRITE_REQUESTS IO_WRITE_SVC_USECS IO_WRITE_SVC_USECS_MAX IO_WRITE_WAIT_USECS IO_WRITE_WAIT_USECS_MAX IO_WRITE_SLOW_IOS IO_READ_BYTES_BLOB IO_READ_REQUESTS_BLOB IO_READ_SVC_USECS_BLOB IO_READ_SVC_USECS_MAX_BLOB IO_READ_WAIT_USECS_BLOB IO_READ_WAIT_USECS_MAX_BLOB IO_READ_SLOW_IOS_BLOB IO_READ_BYTES_PRIMARY IO_READ_REQUESTS_PRIMARY IO_READ_SVC_USECS_PRIMARY IO_READ_SVC_USECS_MAX_PRIMARY IO_READ_WAIT_USECS_PRIMARY IO_READ_WAIT_USECS_MAX_PRIMARY IO_READ_SLOW_IOS_PRIMARY IO_READ_BYTES_SECONDARY IO_READ_REQUESTS_SECONDARY IO_READ_SVC_USECS_SECONDARY IO_READ_SVC_USECS_MAX_SECONDARY IO_READ_WAIT_USECS_SECONDARY IO_READ_WAIT_USECS_MAX_SECONDARY IO_READ_SLOW_IOS_SECONDARY IO_INDEX_INSERTS QUERIES_USED QUERIES_EMPTY COMMENT_BYTES INNODB_ROW_LOCK_WAITS INNODB_ROW_LOCK_WAIT_TIMEOUTS INNODB_PAGES_READ INNODB_PAGES_READ_INDEX INNODB_PAGES_READ_BLOB INNODB_PAGES_WRITTEN INNODB_PAGES_WRITTEN_INDEX INNODB_PAGES_WRITTEN_BLOB -test t1 ROCKSDB 1000 0 0 1000 1001 0 0 0 0 0 0 0 0 0 0 0 0 1 999 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1001 0 0 0 0 0 0 0 0 0 0 +TABLE_SCHEMA TABLE_NAME TABLE_ENGINE ROWS_INSERTED ROWS_UPDATED ROWS_DELETED ROWS_READ ROWS_REQUESTED COMPRESSED_PAGE_SIZE COMPRESS_PADDING COMPRESS_OPS COMPRESS_OPS_OK COMPRESS_PRIMARY_OPS COMPRESS_PRIMARY_OPS_OK COMPRESS_USECS COMPRESS_OK_USECS COMPRESS_PRIMARY_USECS COMPRESS_PRIMARY_OK_USECS UNCOMPRESS_OPS UNCOMPRESS_USECS ROWS_INDEX_FIRST ROWS_INDEX_NEXT IO_READ_BYTES IO_READ_REQUESTS IO_READ_SVC_USECS IO_READ_SVC_USECS_MAX IO_READ_WAIT_USECS IO_READ_WAIT_USECS_MAX IO_READ_SLOW_IOS IO_WRITE_BYTES IO_WRITE_REQUESTS IO_WRITE_SVC_USECS IO_WRITE_SVC_USECS_MAX IO_WRITE_WAIT_USECS IO_WRITE_WAIT_USECS_MAX IO_WRITE_SLOW_IOS IO_READ_BYTES_BLOB IO_READ_REQUESTS_BLOB IO_READ_SVC_USECS_BLOB IO_READ_SVC_USECS_MAX_BLOB IO_READ_WAIT_USECS_BLOB IO_READ_WAIT_USECS_MAX_BLOB IO_READ_SLOW_IOS_BLOB IO_READ_BYTES_PRIMARY IO_READ_REQUESTS_PRIMARY IO_READ_SVC_USECS_PRIMARY IO_READ_SVC_USECS_MAX_PRIMARY IO_READ_WAIT_USECS_PRIMARY IO_READ_WAIT_USECS_MAX_PRIMARY IO_READ_SLOW_IOS_PRIMARY IO_READ_BYTES_SECONDARY IO_READ_REQUESTS_SECONDARY IO_READ_SVC_USECS_SECONDARY IO_READ_SVC_USECS_MAX_SECONDARY IO_READ_WAIT_USECS_SECONDARY IO_READ_WAIT_USECS_MAX_SECONDARY IO_READ_SLOW_IOS_SECONDARY IO_INDEX_INSERTS QUERIES_USED QUERIES_EMPTY COMMENT_BYTES ROW_LOCK_WAITS ROW_LOCK_WAIT_TIMEOUTS ROW_LOCK_DEADLOCKS INNODB_PAGES_READ INNODB_PAGES_READ_INDEX INNODB_PAGES_READ_BLOB INNODB_PAGES_WRITTEN INNODB_PAGES_WRITTEN_INDEX INNODB_PAGES_WRITTEN_BLOB +test t1 ROCKSDB 1000 0 0 1000 1001 0 0 0 0 0 0 0 0 0 0 0 0 1 999 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1001 0 0 0 0 0 0 0 0 0 0 0 DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result index bbdd604097f07..f072e5a494f2e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; -ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Got error 195 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; -ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Got error 196 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) @@ -11,7 +11,7 @@ PARTITION P1 VALUES LESS THAN (2000) DATA DIRECTORY = '/foo/bar/data/', PARTITION P2 VALUES LESS THAN (MAXVALUE) ); -ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Got error 195 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) @@ -20,4 +20,4 @@ PARTITION P1 VALUES LESS THAN (2000) INDEX DIRECTORY = '/foo/bar/data/', PARTITION P2 VALUES LESS THAN (MAXVALUE) ); -ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Got error 196 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB diff --git a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary.result b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary.result new file mode 100644 index 0000000000000..79ed7ec039609 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary.result @@ -0,0 +1,491 @@ +CREATE TABLE t1 ( +`a` binary(8) NOT NULL, +`b` varbinary(64) NOT NULL, +`c` varbinary(256) NOT NULL, +`ts` bigint(20) UNSIGNED NOT NULL, +`value` mediumblob NOT NULL, +PRIMARY KEY (`b`,`a`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values ('a', 'b', 'c', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values ('d', 'e', 'f', UNIX_TIMESTAMP(), 'g'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +b int NOT NULL, +ts bigint(20) UNSIGNED NOT NULL, +c int NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, UNIX_TIMESTAMP(), 5); +INSERT INTO t1 values (2, 4, UNIX_TIMESTAMP(), 6); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +b int NOT NULL, +c int NOT NULL, +ts bigint(20) UNSIGNED NOT NULL, +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, 5, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, 4, 6, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +b int NOT NULL, +c int NOT NULL, +ts bigint(20) UNSIGNED NOT NULL, +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, 5, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, 4, 6, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +b int, +c int, +ts bigint(20) UNSIGNED NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, NULL, NULL, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, NULL, NULL, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +`a` binary(8) NOT NULL, +`b` varbinary(64), +`c` varbinary(256), +`ts` bigint(20) UNSIGNED NOT NULL, +`value` mediumblob NOT NULL, +PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values ('a', NULL, 'bc', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values ('d', 'efghijk', NULL, UNIX_TIMESTAMP(), 'l'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +b int NOT NULL, +c int NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, 5); +INSERT INTO t1 values (2, 4, 6); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a int, +ts bigint(20) UNSIGNED NOT NULL, +PRIMARY KEY (a, ts) +) ENGINE=rocksdb +COMMENT='ttl_duration=5;ttl_col=ts;'; +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, UNIX_TIMESTAMP()); +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_debug_ttl_snapshot_ts = -10; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_debug_ttl_snapshot_ts = 10; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +b int NOT NULL, +ts bigint(20) UNSIGNED NOT NULL, +c int NOT NULL, +PRIMARY KEY (a, ts) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, UNIX_TIMESTAMP(), 5); +INSERT INTO t1 values (2, 4, UNIX_TIMESTAMP(), 6); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +`a` binary(8) NOT NULL, +`b` varbinary(64), +`c` varbinary(256), +`ts` bigint(20) UNSIGNED NOT NULL, +`value` mediumblob NOT NULL, +PRIMARY KEY (`a`, `ts`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=1;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values ('a', NULL, 'bc', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values ('de', 'fghijk', NULL, UNIX_TIMESTAMP(), 'l'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a INT NOT NULL, +b varbinary(64) NOT NULL, +c varbinary(256) NOT NULL, +ts bigint(20) UNSIGNED NOT NULL, +value mediumblob NOT NULL, +PRIMARY KEY (b,a,c) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=10;ttl_col=ts;'; +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1, 'b', 'c', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values (2, 'e', 'f', UNIX_TIMESTAMP(), 'g'); +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (3, 'i', 'j', UNIX_TIMESTAMP(), 'k'); +INSERT INTO t1 values (4, 'm', 'n', UNIX_TIMESTAMP(), 'o'); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_debug_ttl_snapshot_ts = -3600; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT a FROM t1; +a +1 +2 +3 +4 +set global rocksdb_compact_cf='default'; +SELECT a FROM t1; +a +3 +4 +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT a FROM t1; +a +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) UNSIGNED NOT NULL, +b int NOT NULL, +c int NOT NULL, +ts bigint(20), +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +ERROR HY000: TTL column (ts) in MyRocks must be an unsigned non-null 64-bit integer, exist inside the table, and have an accompanying ttl duration. +CREATE TABLE t1 ( +a bigint(20) UNSIGNED NOT NULL, +b int NOT NULL, +c int NOT NULL, +ts int, +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; +ERROR HY000: TTL column (ts) in MyRocks must be an unsigned non-null 64-bit integer, exist inside the table, and have an accompanying ttl duration. +CREATE TABLE t1 ( +a bigint(20) UNSIGNED NOT NULL, +b int NOT NULL, +c int NOT NULL, +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=abc;'; +ERROR HY000: TTL duration (abc) in MyRocks must be an unsigned non-null 64-bit integer. +CREATE TABLE t1 ( +a bigint(20) UNSIGNED NOT NULL, +b int NOT NULL, +c int NOT NULL, +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=abc;'; +ERROR HY000: TTL column (abc) in MyRocks must be an unsigned non-null 64-bit integer, exist inside the table, and have an accompanying ttl duration. +CREATE TABLE t1 ( +a bigint(20) UNSIGNED NOT NULL, +b int NOT NULL, +c int NOT NULL, +PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_col=abc;'; +ERROR HY000: TTL column (abc) in MyRocks must be an unsigned non-null 64-bit integer, exist inside the table, and have an accompanying ttl duration. +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=500;'; +INSERT INTO t1 values (1); +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +DROP TABLE t1; +CREATE TABLE t1 ( +a INT PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; +INSERT INTO t1 values (1); +SELECT * FROM t1; +a +1 +set global rocksdb_debug_ttl_rec_ts = -300; +ALTER TABLE t1 COMMENT = 'ttl_duration=1'; +set global rocksdb_debug_ttl_rec_ts = 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COMMENT='ttl_duration=1' +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b INT +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; +ALTER TABLE t1 DROP PRIMARY KEY; +ERROR HY000: TTL support is currently disabled when table has secondary indexes or hidden PK. +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; +ERROR HY000: TTL support is currently disabled when table has secondary indexes or hidden PK. +DROP TABLE t1; +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b INT +) ENGINE=rocksdb +COMMENT='ttl_duration=5;'; +INSERT INTO t1 VALUES (1,1); +INSERT INTO t1 VALUES (2,2); +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(b); +set global rocksdb_debug_ttl_snapshot_ts = -3600; +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +2 +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set @@global.rocksdb_compact_cf = 'default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) UNSIGNED NOT NULL, +b int, +PRIMARY KEY (a,b) +) ENGINE=rocksdb +COMMENT='asdadfasdfsadfadf ;ttl_duration=1; asfasdfasdfadfa'; +INSERT INTO t1 values (UNIX_TIMESTAMP(), 1); +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +ALTER TABLE t1 COMMENT = 'adsf;;ttl_duration=5;asfasdfa;ttl_col=a;asdfasdf;'; +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (UNIX_TIMESTAMP(), 2); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=5;'; +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1); +INSERT INTO t1 values (3); +INSERT INTO t1 values (5); +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (7); +INSERT INTO t1 values (9); +set global rocksdb_debug_ttl_rec_ts = 0; +UPDATE t1 SET a=a+1; +SELECT * FROM t1; +a +10 +2 +4 +6 +8 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT * FROM t1; +a +10 +8 +DROP TABLE t1; +CREATE TABLE t1 ( +a INT, +b bigint(20) UNSIGNED NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=5;ttl_col=b;'; +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +INSERT INTO t1 values (3, UNIX_TIMESTAMP()); +INSERT INTO t1 values (5, UNIX_TIMESTAMP()); +INSERT INTO t1 values (7, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 300; +UPDATE t1 SET b=UNIX_TIMESTAMP() WHERE a < 4; +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT a FROM t1; +a +1 +3 +5 +7 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT a FROM t1; +a +1 +3 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +INSERT INTO t1 values (3); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_enable_ttl=0; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_enable_ttl=1; +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +variable_value-@c +3 +SELECT COUNT(*) FROM t1; +COUNT(*) +0 +DROP TABLE t1; +CREATE TABLE t1 ( +a bigint(20) NOT NULL, +PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +INSERT INTO t1 values (3); +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +variable_value-@c +0 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result new file mode 100644 index 0000000000000..0a91fe3fcbd85 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result @@ -0,0 +1,238 @@ +CREATE TABLE t1 ( +a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; +SELECT * FROM t1; +a +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +variable_value-@c +2 +DROP TABLE t1; +CREATE TABLE t1 ( +a int PRIMARY KEY, +b BIGINT UNSIGNED NOT NULL +) ENGINE=rocksdb +COMMENT='ttl_duration=10;'; +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (2, UNIX_TIMESTAMP()); +INSERT INTO t1 values (3, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; +SELECT a FROM t1; +a +2 +3 +set global rocksdb_compact_cf='default'; +SELECT a FROM t1; +a +2 +3 +set global rocksdb_debug_ttl_read_filter_ts = -310; +SELECT a FROM t1; +a +set global rocksdb_debug_ttl_read_filter_ts = 0; +DROP TABLE t1; +CREATE TABLE t1 ( +a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +INSERT INTO t1 values (3); +INSERT INTO t1 values (5); +INSERT INTO t1 values (7); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT * FROM t1; +a +set global rocksdb_enable_ttl_read_filtering=0; +SELECT * FROM t1; +a +1 +3 +5 +7 +set global rocksdb_enable_ttl_read_filtering=1; +SELECT * FROM t1; +a +DROP TABLE t1; +CREATE TABLE t1 ( +a int, +b int, +c int, +PRIMARY KEY (a,b,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (0,0,0); +INSERT INTO t1 values (0,0,1); +INSERT INTO t1 values (0,1,0); +INSERT INTO t1 values (0,1,1); +INSERT INTO t1 values (1,1,2); +INSERT INTO t1 values (1,2,1); +INSERT INTO t1 values (1,2,2); +INSERT INTO t1 values (1,2,3); +set global rocksdb_debug_ttl_rec_ts = 0; +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_force_flush_memtable_now=1; +SELECT * FROM t1 WHERE a=1 AND b=2 AND c=2; +a b c +SELECT * FROM t1 WHERE a = 1; +a b c +SELECT max(a) from t1 where a < 3; +max(a) +NULL +SELECT max(a) from t1 where a < 2 AND b = 1 AND c < 3; +max(a) +NULL +SELECT min(a) from t1 where a >= 1; +min(a) +NULL +SELECT min(a) from t1 where a > 1; +min(a) +NULL +select * from t1 where a=1 and b in (1) order by c desc; +a b c +select max(a) from t1 where a <=10; +max(a) +NULL +select a from t1 where a > 0 and a <= 2; +a +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +variable_value-@c +0 +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +variable_value-@c +8 +DROP TABLE t1; +CREATE TABLE t1 ( +a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; +set global rocksdb_debug_ttl_rec_ts = -110; +INSERT INTO t1 values (1); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT * FROM t1; +a +INSERT INTO t1 values (1); +SELECT * FROM t1; +a +1 +DROP TABLE t1; +set global rocksdb_force_flush_memtable_now=1; +CREATE TABLE t1 ( +a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT * FROM t1; +a +UPDATE t1 set a = 1; +DROP TABLE t1; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +CREATE TABLE t1 ( +a int PRIMARY KEY, +b int +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; +set global rocksdb_debug_ttl_rec_ts = -110; +INSERT INTO t1 values (1,1); +INSERT INTO t1 values (3,3); +set global rocksdb_debug_ttl_rec_ts = 0; +INSERT INTO t1 values (5,5); +UPDATE t1 set a = 1; +SELECT * FROM t1; +a b +1 5 +set global rocksdb_enable_ttl_read_filtering=0; +SELECT * FROM t1; +a b +1 5 +3 3 +set global rocksdb_enable_ttl_read_filtering=1; +UPDATE t1 set a = 999 where a = 1; +SELECT * FROM t1; +a b +999 5 +UPDATE t1 set a = a - 1; +SELECT * FROM t1; +a b +998 5 +DROP TABLE t1; +CREATE TABLE t1 ( +a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=5;'; +INSERT INTO t1 values (1); +# Creating Snapshot (start transaction) +BEGIN; +SELECT * FROM t1; +a +1 +SELECT * FROM t1; +a +1 +# Switching to connection 2 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT * FROM t1; +a +# Switching to connection 1 +SELECT * FROM t1; +a +1 +UPDATE t1 set a = a + 1; +SELECT * FROM t1; +a +2 +COMMIT; +SELECT * FROM t1; +a +DROP TABLE t1; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +CREATE TABLE t1 ( +a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +# On Connection 1 +# Creating Snapshot (start transaction) +BEGIN; +SELECT * FROM t1; +a +# On Connection 2 +set global rocksdb_debug_ttl_rec_ts = -2; +INSERT INTO t1 values (1); +INSERT INTO t1 values (3); +INSERT INTO t1 values (5); +INSERT INTO t1 values (7); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +# On Connection 1 +SELECT * FROM t1; +a +# On Connection 2 +SELECT * FROM t1; +a +set global rocksdb_enable_ttl_read_filtering=0; +SELECT * FROM t1; +a +1 +3 +5 +7 +set global rocksdb_enable_ttl_read_filtering=1; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result new file mode 100644 index 0000000000000..3816accad8cb1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result @@ -0,0 +1,256 @@ +CREATE TABLE t1 ( +c1 INT, +PRIMARY KEY (`c1`) +) ENGINE=ROCKSDB +COMMENT="custom_p0_ttl_duration=1;" +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +set global rocksdb_debug_ttl_rec_ts = -3600; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +INSERT INTO t1 values (3); +INSERT INTO t1 values (4); +INSERT INTO t1 values (5); +INSERT INTO t1 values (6); +INSERT INTO t1 values (7); +INSERT INTO t1 values (8); +INSERT INTO t1 values (9); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT * FROM t1; +c1 +1 +2 +3 +4 +5 +6 +7 +8 +9 +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT * FROM t1; +c1 +2 +3 +5 +6 +8 +9 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +COMMENT="custom_p0_ttl_duration=1;custom_p1_ttl_duration=7;" +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 4, 7), +PARTITION custom_p1 VALUES IN (2, 5, 8), +PARTITION custom_p2 VALUES IN (3, 6, 9) +); +set global rocksdb_debug_ttl_rec_ts = -1200; +INSERT INTO t1 values (1,1,'a'); +INSERT INTO t1 values (4,4,'aaaa'); +INSERT INTO t1 values (7,7,'aaaaaaa'); +set global rocksdb_debug_ttl_rec_ts = 1200; +INSERT INTO t1 values (2,2,'aa'); +INSERT INTO t1 values (3,3,'aaa'); +INSERT INTO t1 values (5,5,'aaaaa'); +INSERT INTO t1 values (6,6,'aaaaaa'); +INSERT INTO t1 values (8,8,'aaaaaaaa'); +INSERT INTO t1 values (9,9,'aaaaaaaaa'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT * FROM t1; +c1 c2 name +1 1 a +2 2 aa +3 3 aaa +4 4 aaaa +5 5 aaaaa +6 6 aaaaaa +7 7 aaaaaaa +8 8 aaaaaaaa +9 9 aaaaaaaaa +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +SELECT * FROM t1; +c1 c2 name +2 2 aa +3 3 aaa +5 5 aaaaa +6 6 aaaaaa +8 8 aaaaaaaa +9 9 aaaaaaaaa +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set @@global.rocksdb_compact_cf = 'foo'; +SELECT * FROM t1; +c1 c2 name +2 2 aa +3 3 aaa +5 5 aaaaa +6 6 aaaaaa +8 8 aaaaaaaa +9 9 aaaaaaaaa +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT * FROM t1; +c1 c2 name +3 3 aaa +6 6 aaaaaa +9 9 aaaaaaaaa +DROP TABLE t1; +CREATE TABLE t1 ( +c1 INT, +c2 INT, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;' +) ENGINE=ROCKSDB +COMMENT="custom_p0_ttl_duration=9999;custom_p2_ttl_duration=5;" +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 2, 3), +PARTITION custom_p1 VALUES IN (4, 5, 6), +PARTITION custom_p2 VALUES IN (7, 8, 9) +); +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); +INSERT INTO t1 VALUES (4, 4, "four", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (6, 6, "six", null); +INSERT INTO t1 VALUES (7, 7, "seven", null); +INSERT INTO t1 VALUES (8, 8, "eight", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +3 3 three NULL +4 4 four NULL +5 5 five NULL +6 6 six NULL +7 7 seven NULL +8 8 eight NULL +9 9 nine NULL +set global rocksdb_debug_ttl_rec_ts = 600; +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(`c2`,`c1`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;'; +set global rocksdb_debug_ttl_rec_ts = 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` int(11) NOT NULL DEFAULT '0', + `c2` int(11) NOT NULL DEFAULT '0', + `name` varchar(25) NOT NULL, + `event` date DEFAULT NULL, + PRIMARY KEY (`c2`,`c1`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COMMENT='custom_p0_ttl_duration=9999;custom_p2_ttl_duration=5;' +/*!50100 PARTITION BY LIST (c1) +(PARTITION custom_p0 VALUES IN (1,2,3) ENGINE = ROCKSDB, + PARTITION custom_p1 VALUES IN (4,5,6) ENGINE = ROCKSDB, + PARTITION custom_p2 VALUES IN (7,8,9) ENGINE = ROCKSDB) */ +set global rocksdb_debug_ttl_snapshot_ts = 100; +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'baz'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +3 3 three NULL +4 4 four NULL +5 5 five NULL +6 6 six NULL +7 7 seven NULL +8 8 eight NULL +9 9 nine NULL +set global rocksdb_debug_ttl_snapshot_ts = 1200; +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'baz'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT * FROM t1; +c1 c2 name event +1 1 one NULL +2 2 two NULL +3 3 three NULL +4 4 four NULL +5 5 five NULL +6 6 six NULL +DROP TABLE t1; +CREATE TABLE t1 ( +c1 BIGINT, +c2 BIGINT UNSIGNED NOT NULL, +name VARCHAR(25) NOT NULL, +event DATE, +PRIMARY KEY (`c1`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;' +) ENGINE=ROCKSDB +COMMENT="ttl_duration=1;custom_p1_ttl_duration=100;custom_p1_ttl_col=c2;custom_p2_ttl_duration=5000;" +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1, 2, 3), +PARTITION custom_p1 VALUES IN (4, 5, 6), +PARTITION custom_p2 VALUES IN (7, 8, 9) +); +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 VALUES (1, UNIX_TIMESTAMP(), "one", null); +INSERT INTO t1 VALUES (2, UNIX_TIMESTAMP(), "two", null); +INSERT INTO t1 VALUES (3, UNIX_TIMESTAMP(), "three", null); +set global rocksdb_debug_ttl_rec_ts = 0; +INSERT INTO t1 VALUES (4, UNIX_TIMESTAMP(), "four", null); +INSERT INTO t1 VALUES (5, UNIX_TIMESTAMP(), "five", null); +INSERT INTO t1 VALUES (6, UNIX_TIMESTAMP(), "six", null); +INSERT INTO t1 VALUES (7, UNIX_TIMESTAMP(), "seven", null); +INSERT INTO t1 VALUES (8, UNIX_TIMESTAMP(), "eight", null); +INSERT INTO t1 VALUES (9, UNIX_TIMESTAMP(), "nine", null); +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'baz'; +set @@global.rocksdb_compact_cf = 'bar'; +SELECT c1 FROM t1; +c1 +4 +5 +6 +7 +8 +9 +set global rocksdb_debug_ttl_snapshot_ts = 600; +set @@global.rocksdb_compact_cf = 'bar'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT c1 FROM t1; +c1 +7 +8 +9 +DROP TABLE t1; +CREATE TABLE t1 ( +c1 BIGINT, +c2 BIGINT UNSIGNED NOT NULL, +PRIMARY KEY (`c1`, `c2`) +) ENGINE=ROCKSDB +COMMENT="ttl_duration=100;ttl_col=c2;" +PARTITION BY LIST(c1) ( +PARTITION custom_p0 VALUES IN (1), +PARTITION custom_p1 VALUES IN (2), +PARTITION custom_p2 VALUES IN (3) +); +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, UNIX_TIMESTAMP()); +INSERT INTO t1 values (3, UNIX_TIMESTAMP()); +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +SELECT c1 FROM t1; +c1 +1 +2 +3 +set global rocksdb_debug_ttl_snapshot_ts = 300; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT c1 FROM t1; +c1 +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result index 62a3004e5844e..391649e0e3b10 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_set_indexes.result @@ -77,4 +77,39 @@ a Africa,Europe,Asia S.America,Europe DROP TABLE t1; +CREATE TABLE t1 ( +a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), +b SET('test1','test2','test3','test4','test5'), +c SET('01','22','23','33','34','39','40','44','50','63','64'), +pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, +UNIQUE INDEX b_a (b,a) +) ENGINE=rocksdb; +SHOW INDEX IN t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE +t1 0 b_a 1 b A 500 NULL NULL YES LSMTREE +t1 0 b_a 2 a A 1000 NULL NULL YES LSMTREE +INSERT INTO t1 (a,b,c,pk) VALUES +('','test2,test3','01,34,44,23',1), +('',5,2,2), +('N.America,Asia','test4,test2','',3), +('Africa,Europe,Asia','test2,test3','01',4), +('Antarctica','test3','34,44',5), +('Asia','test5','50',6), +('Europe,S.America','test1,','39',7); +Warnings: +Warning 1265 Data truncated for column 'b' at row 7 +EXPLAIN SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary; Using filesort +SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; +a b + test1,test3 + test2,test3 +Africa,Europe,Asia test2,test3 +Antarctica test3 +Asia test5 +N.America,Asia test2,test4 +S.America,Europe test1 +DROP TABLE t1; SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result index 3cb06bc3c9c5c..9784b1a1d94d5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -190,6 +190,9 @@ insert into t1 values (0, 'ab', 'a-b'); insert into t1 values (1, 'a ', 'a-space'); insert into t1 values (2, 'a', 'a'); insert into t1 values (3, 'a \t', 'a-tab'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK # Must show 'using index' for latin1_bin and utf8_bin: explain select col1, hex(col1) from t1; @@ -302,6 +305,9 @@ insert into t1 values (0, 'ab', 'a-b'); insert into t1 values (1, 'a ', 'a-space'); insert into t1 values (2, 'a', 'a'); insert into t1 values (3, 'a \t', 'a-tab'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK # Must show 'using index' for latin1_bin and utf8_bin: explain select col1, hex(col1) from t1; @@ -414,6 +420,9 @@ insert into t1 values (0, 'ab', 'a-b'); insert into t1 values (1, 'a ', 'a-space'); insert into t1 values (2, 'a', 'a'); insert into t1 values (3, 'a \t', 'a-tab'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK # Must show 'using index' for latin1_bin and utf8_bin: explain select col1, hex(col1) from t1; @@ -526,6 +535,9 @@ insert into t1 values (0, 'ab', 'a-b'); insert into t1 values (1, 'a ', 'a-space'); insert into t1 values (2, 'a', 'a'); insert into t1 values (3, 'a \t', 'a-tab'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK # Must show 'using index' for latin1_bin and utf8_bin: explain select col1, hex(col1) from t1; @@ -638,6 +650,9 @@ insert into t1 values (0, 'ab', 'a-b'); insert into t1 values (1, 'a ', 'a-space'); insert into t1 values (2, 'a', 'a'); insert into t1 values (3, 'a \t', 'a-tab'); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK # Must show 'using index' for latin1_bin and utf8_bin: explain select col1, hex(col1) from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result index d5cfdee4f07c5..c106f0e77f637 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result @@ -1,2 +1,2 @@ RocksDB: Can't enable both use_direct_reads and allow_mmap_reads - RocksDB: Can't enable both use_direct_writes and allow_mmap_writes + RocksDB: Can't enable both use_direct_io_for_flush_and_compaction and allow_mmap_writes diff --git a/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result b/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result new file mode 100644 index 0000000000000..9362d42515cf6 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result @@ -0,0 +1,260 @@ +CREATE TABLE t1( +vb VARBINARY(64) primary key +) ENGINE=rocksdb; +INSERT INTO t1 values(0x00); +INSERT INTO t1 values(0x0000); +INSERT INTO t1 values(0x0000000000000000); +INSERT INTO t1 values(0x000000); +INSERT INTO t1 values(0x000000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x0000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000); +INSERT INTO t1 values(0x0000000000); +INSERT INTO t1 values(0x00000000000000000000); +INSERT INTO t1 values(0x000000000000); +INSERT INTO t1 values(0x00000000000000); +INSERT INTO t1 values(0x000000000000000000); +SELECT hex(vb) FROM t1; +hex(vb) +00 +0000 +000000 +00000000 +0000000000 +000000000000 +00000000000000 +0000000000000000 +000000000000000000 +00000000000000000000 +00000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000 +BEGIN; +SELECT hex(vb) FROM t1 FOR UPDATE; +hex(vb) +00 +0000 +000000 +00000000 +0000000000 +000000000000 +00000000000000 +0000000000000000 +000000000000000000 +00000000000000000000 +00000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000 +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +SUBSTRING(a.key,9) +000000000000000001 +000000000000000002 +000000000000000003 +000000000000000004 +000000000000000005 +000000000000000006 +000000000000000007 +000000000000000008 +000000000000000009000000000000000001 +000000000000000009000000000000000002 +000000000000000009000000000000000009000000000000000009000000000000000007 +000000000000000009000000000000000009000000000000000009000000000000000008 +000000000000000009000000000000000009000000000000000009000000000000000009000000000000000001 +ROLLBACK; +DROP TABLE t1; +set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +CREATE TABLE t1( +vb VARBINARY(64) primary key +) ENGINE=rocksdb; +set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +INSERT INTO t1 values(0x00); +INSERT INTO t1 values(0x0000); +INSERT INTO t1 values(0x0000000000000000); +INSERT INTO t1 values(0x000000); +INSERT INTO t1 values(0x000000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x0000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000); +INSERT INTO t1 values(0x0000000000); +INSERT INTO t1 values(0x00000000000000000000); +INSERT INTO t1 values(0x000000000000); +INSERT INTO t1 values(0x00000000000000); +INSERT INTO t1 values(0x000000000000000000); +SELECT hex(vb) FROM t1; +hex(vb) +00 +0000 +000000 +00000000 +0000000000 +000000000000 +00000000000000 +0000000000000000 +000000000000000000 +00000000000000000000 +00000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000 +BEGIN; +SELECT hex(vb) FROM t1 FOR UPDATE; +hex(vb) +00 +0000 +000000 +00000000 +0000000000 +000000000000 +00000000000000 +0000000000000000 +000000000000000000 +00000000000000000000 +00000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000 +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +SUBSTRING(a.key,9) +0000000000000000f8 +0000000000000000f9 +0000000000000000fa +0000000000000000fb +0000000000000000fc +0000000000000000fd +0000000000000000fe +0000000000000000ff0000000000000000f7 +0000000000000000ff0000000000000000f8 +0000000000000000ff0000000000000000f9 +0000000000000000ff0000000000000000ff0000000000000000ff0000000000000000fe +0000000000000000ff0000000000000000ff0000000000000000ff0000000000000000ff0000000000000000f7 +0000000000000000ff0000000000000000ff0000000000000000ff0000000000000000ff0000000000000000f8 +ROLLBACK; +DROP TABLE t1; +CREATE TABLE t1( +vc VARCHAR(64) collate 'binary' primary key +) ENGINE=rocksdb; +INSERT INTO t1 values('a'); +INSERT INTO t1 values('aa'); +INSERT INTO t1 values('aaaaaaaa'); +INSERT INTO t1 values('aaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaa'); +INSERT INTO t1 values('aaaaa'); +INSERT INTO t1 values('aaaaaaaaaa'); +INSERT INTO t1 values('aaaaaa'); +INSERT INTO t1 values('aaaaaaa'); +INSERT INTO t1 values('aaaaaaaaa'); +SELECT * FROM t1; +vc +a +aa +aaa +aaaa +aaaaa +aaaaaa +aaaaaaa +aaaaaaaa +aaaaaaaaa +aaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +BEGIN; +SELECT * FROM t1 FOR UPDATE; +vc +a +aa +aaa +aaaa +aaaaa +aaaaaa +aaaaaaa +aaaaaaaa +aaaaaaaaa +aaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +SUBSTRING(a.key,9) +610000000000000001 +616100000000000002 +616161000000000003 +616161610000000004 +616161616100000005 +616161616161000006 +616161616161610007 +616161616161616108 +616161616161616109610000000000000001 +616161616161616109616100000000000002 +616161616161616109616161616161616109616161616161616109616161616161610007 +616161616161616109616161616161616109616161616161616109616161616161616108 +616161616161616109616161616161616109616161616161616109616161616161616109610000000000000001 +ROLLBACK; +DROP TABLE t1; +set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +CREATE TABLE t1( +vc VARCHAR(64) collate 'binary' primary key +) ENGINE=rocksdb; +set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +INSERT INTO t1 values('a'); +INSERT INTO t1 values('aa'); +INSERT INTO t1 values('aaaaaaaa'); +INSERT INTO t1 values('aaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaa'); +INSERT INTO t1 values('aaaaa'); +INSERT INTO t1 values('aaaaaaaaaa'); +INSERT INTO t1 values('aaaaaa'); +INSERT INTO t1 values('aaaaaaa'); +INSERT INTO t1 values('aaaaaaaaa'); +SELECT * FROM t1; +vc +a +aa +aaa +aaaa +aaaaa +aaaaaa +aaaaaaa +aaaaaaaa +aaaaaaaaa +aaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +BEGIN; +SELECT * FROM t1 FOR UPDATE; +vc +a +aa +aaa +aaaa +aaaaa +aaaaaa +aaaaaaa +aaaaaaaa +aaaaaaaaa +aaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +SUBSTRING(a.key,9) +6100000000000000f8 +6161000000000000f9 +6161610000000000fa +6161616100000000fb +6161616161000000fc +6161616161610000fd +6161616161616100fe +6161616161616161ff0000000000000000f7 +6161616161616161ff6100000000000000f8 +6161616161616161ff6161000000000000f9 +6161616161616161ff6161616161616161ff6161616161616161ff6161616161616100fe +6161616161616161ff6161616161616161ff6161616161616161ff6161616161616161ff0000000000000000f7 +6161616161616161ff6161616161616161ff6161616161616161ff6161616161616161ff6100000000000000f8 +ROLLBACK; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result index 6ba50a3796fdd..9eed611a97000 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/write_sync.result @@ -1,7 +1,8 @@ SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; -SET LOCAL rocksdb_flush_log_at_trx_commit=0; +set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; @@ -15,7 +16,7 @@ insert aaa(id, i) values(3,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; variable_value-@a 0 -SET LOCAL rocksdb_flush_log_at_trx_commit=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; insert aaa(id, i) values(4,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; variable_value-@a @@ -28,12 +29,10 @@ insert aaa(id, i) values(6,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; variable_value-@a 3 -SET GLOBAL rocksdb_background_sync=on; -SET LOCAL rocksdb_flush_log_at_trx_commit=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=2; insert aaa(id, i) values(7,1); truncate table aaa; drop table aaa; -SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +set @@global.rocksdb_flush_log_at_trx_commit=@save_rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=false; -SET GLOBAL rocksdb_background_sync=off; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test index e5abc7e5b34be..5216e84646ac5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace.test @@ -170,7 +170,7 @@ DROP TABLE t1; # test failure in prepare phase (due to collation) CREATE TABLE t1 (a INT, b TEXT); ---error 1105 +--error ER_UNSUPPORTED_COLLATION ALTER TABLE t1 ADD KEY kb(b(10)); ALTER TABLE t1 ADD PRIMARY KEY(a); DROP TABLE t1; @@ -341,3 +341,48 @@ while ($i <= $max) { #SHOW TABLE STATUS WHERE name LIKE 't1'; DROP TABLE t1; + +# https://github.com/facebook/mysql-5.6/issues/602 +# Add then drop same index should be optimized out. +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b INT, +c INT, +KEY kbc(b,c)) ENGINE = ROCKSDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,1); +INSERT INTO t1 (a,b,c) VALUES (2,2,2); +INSERT INTO t1 (a,b,c) VALUES (3,3,3); +SHOW CREATE TABLE t1; + +ALTER TABLE t1 DROP INDEX kbc, ADD INDEX kbc(b,c), ALGORITHM=INPLACE; +ALTER TABLE t1 DROP INDEX kbc; +DROP TABLE t1; + +# Make sure changing key part prefix length causes index rebuild as well. +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b varchar(10), +index kb(b(5)) +) ENGINE = ROCKSDB charset utf8 collate utf8_bin; + +INSERT INTO t1 (a,b) VALUES (1,'1111122222'); +INSERT INTO t1 (a,b) VALUES (2,'2222233333'); +INSERT INTO t1 (a,b) VALUES (3,'3333344444'); + +--let $start_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) + +ALTER TABLE t1 DROP INDEX kb, ADD INDEX kb(b(8)), ALGORITHM=INPLACE; +SELECT * FROM t1 FORCE INDEX(kb); + +--let $end_max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1) + +if ($end_max_index_id <= $start_max_index_id) { + echo Max index ID did not increase; +} + +SHOW CREATE TABLE t1; +DROP TABLE t1; + + + + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test b/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test index 375a63c3a3839..cf0546d182cbc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_unique_index_inplace.test @@ -72,11 +72,30 @@ INSERT INTO t1 (a, b) VALUES (1, 5); INSERT INTO t1 (a, b) VALUES (2, 6); INSERT INTO t1 (a, b) VALUES (3, 7); -# should fail, can't add unique index on table w/ no pk ---error 1105 +# should succeed, we can add unique index on table w/ no pk ALTER TABLE t1 ADD UNIQUE INDEX kb(b); +INSERT INTO t1 (a, b) VALUES (4, 8); +--error ER_DUP_ENTRY +INSERT INTO t1 (a, b) VALUES (5, 5); SHOW CREATE TABLE t1; DROP TABLE t1; +# Test changing key with same name and parts from not unique to unique, should +# fail. +CREATE TABLE t1 ( +a INT PRIMARY KEY, +b INT, +c INT, +KEY kbc(b,c)) ENGINE = ROCKSDB; +INSERT INTO t1 (a,b,c) VALUES (1,1,1); +INSERT INTO t1 (a,b,c) VALUES (2,2,2); +INSERT INTO t1 (a,b,c) VALUES (3,2,2); + +# should cause error here, duplicate value 2-2 +--error 1062 +ALTER TABLE t1 DROP INDEX kbc, ADD UNIQUE INDEX kbc(b,c), ALGORITHM=INPLACE; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test index d1fe15b98fee4..2a064dc3b0039 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/allow_no_primary_key.test @@ -71,9 +71,16 @@ CHECK TABLE t1 CHANGED; DROP TABLE t1, t2; -# test disabling unique keys ---error 1105 +# test unique keys with no primary key CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=rocksdb; +INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); +--error ER_DUP_ENTRY +INSERT INTO t1 (a,b) VALUES (1,'c'); +SELECT * FROM t1; +SELECT * FROM t1 WHERE a = 2; +EXPLAIN SELECT * FROM t1 WHERE a = 2; + +DROP TABLE t1; ## test restarting a table that has no data CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt index 8600e9e415ca0..bf9d0624522b5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter-master.opt @@ -1,2 +1,2 @@ --rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:20 ---rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240} +--rocksdb_override_cf_options=cf_short_prefix={prefix_extractor=capped:4};rev:cf_short_prefix={prefix_extractor=capped:4};cf_long_prefix={prefix_extractor=capped:240};rev:cf_long_prefix={prefix_extractor=capped:240} diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc index acc1a9f236581..11fcb808f7643 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc @@ -26,12 +26,20 @@ DELIMITER ;// --source $ddl --source suite/rocksdb/t/bloomfilter_load_select.inc +--exec sed s/##CF##/" COMMENT 'rev:cf_short_prefix'"/g $tmpl_ddl > $ddl +--source $ddl +--source suite/rocksdb/t/bloomfilter_load_select.inc #BF is most of the time invoked and useful --exec sed s/##CF##/" COMMENT 'cf_long_prefix'"/g $tmpl_ddl > $ddl --source $ddl --source suite/rocksdb/t/bloomfilter_load_select.inc +--exec sed s/##CF##/" COMMENT 'rev:cf_long_prefix'"/g $tmpl_ddl > $ddl +--source $ddl +--source suite/rocksdb/t/bloomfilter_load_select.inc + + # BUG: Prev() with prefix lookup should not use prefix bloom filter create table r1 (id1 bigint, id2 bigint, id3 bigint, v1 int, v2 text, primary key (id1, id2, id3)) engine=rocksdb DEFAULT CHARSET=latin1 collate latin1_bin; --disable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc new file mode 100644 index 0000000000000..42cab5ad8c11c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.inc @@ -0,0 +1,156 @@ +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +if ($data_order_desc) +{ + --echo Data will be ordered in descending order +} + +if (!$data_order_desc) +{ + --echo Data will be ordered in ascending order +} + +# Create a table with a primary key and one secondary key as well as one +# more column +eval CREATE TABLE t1( + pk CHAR(5), + a CHAR(30), + b CHAR(30), + PRIMARY KEY(pk) COMMENT "$pk_cf", + KEY(a) +) COLLATE 'latin1_bin'; + +# Create a second identical table to validate that bulk loading different +# tables in the same session works +eval CREATE TABLE t2( + pk CHAR(5), + a CHAR(30), + b CHAR(30), + PRIMARY KEY(pk) COMMENT "$pk_cf", + KEY(a) +) COLLATE 'latin1_bin'; + +# Create a third table using partitions to validate that bulk loading works +# across a partitioned table +eval CREATE TABLE t3( + pk CHAR(5), + a CHAR(30), + b CHAR(30), + PRIMARY KEY(pk) COMMENT "$pk_cf", + KEY(a) +) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; + +--let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` + +--let MTR_DATA_ORDER_DESC = $data_order_desc; + +# Create a text file with data to import into the table. +# The primary key is in sorted order and the secondary keys are randomly generated +--let ROCKSDB_INFILE = $file +perl; +my $fn = $ENV{'ROCKSDB_INFILE'}; +open(my $fh, '>', $fn) || die "perl open($fn): $!"; +my $max = 5000000; +my $desc = $ENV{'MTR_DATA_ORDER_DESC'}; +my @chars = ("A".."Z", "a".."z", "0".."9"); +my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); +for (my $ii = 0; $ii < $max; $ii++) +{ + my $pk; + my $tmp = $ii; + foreach (@powers_of_26) + { + if ($desc == 1) + { + $pk .= chr(ord('z') - int($tmp / $_)); + } + else + { + $pk .= chr(ord('a') + int($tmp / $_)); + } + + $tmp = $tmp % $_; + } + + my $num = int(rand(25)) + 6; + my $a; + $a .= $chars[rand(@chars)] for 1..$num; + + $num = int(rand(25)) + 6; + my $b; + $b .= $chars[rand(@chars)] for 1..$num; + print $fh "$pk\t$a\t$b\n"; +} +close($fh); +EOF + +--file_exists $file + +# Make sure a snapshot held by another user doesn't block the bulk load +connect (other,localhost,root,,); +set session transaction isolation level repeatable read; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; +start transaction with consistent snapshot; +select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; + +connection default; +set rocksdb_bulk_load=1; +set rocksdb_bulk_load_size=100000; +--disable_query_log +--echo LOAD DATA INFILE INTO TABLE t1; +eval LOAD DATA INFILE '$file' INTO TABLE t1; +--echo LOAD DATA INFILE INTO TABLE t2; +eval LOAD DATA INFILE '$file' INTO TABLE t2; +--echo LOAD DATA INFILE INTO TABLE t3; +eval LOAD DATA INFILE '$file' INTO TABLE t3; +--enable_query_log +set rocksdb_bulk_load=0; + +--remove_file $file + +# Make sure row count index stats are correct +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +ANALYZE TABLE t1, t2, t3; + +--replace_column 6 # 7 # 8 # 9 # +SHOW TABLE STATUS WHERE name LIKE 't%'; + +# Make sure all the data is there. +select count(pk) from t1; +select count(a) from t1; +select count(b) from t1; +select count(pk) from t2; +select count(a) from t2; +select count(b) from t2; +select count(pk) from t3; +select count(a) from t3; +select count(b) from t3; + +# Create a dummy file with a bulk load extesion. It should be removed when +# the server starts +--let $tmpext = .bulk_load.tmp +--let $MYSQLD_DATADIR= `SELECT @@datadir` +--let $datadir = $MYSQLD_DATADIR/.rocksdb +--write_file $datadir/test$tmpext +dummy data +EOF +--write_file $datadir/longfilenamethatvalidatesthatthiswillgetdeleted$tmpext +dummy data +EOF + +# Show the files exists +--list_files $datadir *$tmpext + +# Now restart the server and make sure it automatically removes this test file +--source include/restart_mysqld.inc + +# Show the files do not exist +--list_files $datadir *$tmpext + +# Cleanup +disconnect other; +DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test index de332baa46315..bc63fbb549ae7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load.test @@ -1,119 +1,6 @@ --source include/have_rocksdb.inc ---disable_warnings -DROP TABLE IF EXISTS t1, t2, t3; ---enable_warnings +--let pk_cf=cf1 +--let data_order_desc=0 -# Create a table with a primary key and one secondary key as well as one -# more column -CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; - -# Create a second identical table to validate that bulk loading different -# tables in the same session works -CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'; - -# Create a third table using partitions to validate that bulk loading works -# across a partitioned table -CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin' - PARTITION BY KEY() PARTITIONS 4; - ---let $file = `SELECT CONCAT(@@datadir, "test_loadfile.txt")` - -# Create a text file with data to import into the table. -# The primary key is in sorted order and the secondary keys are randomly generated ---let ROCKSDB_INFILE = $file -perl; -my $fn = $ENV{'ROCKSDB_INFILE'}; -open(my $fh, '>>', $fn) || die "perl open($fn): $!"; -my $max = 5000000; -my @chars = ("A".."Z", "a".."z", "0".."9"); -my @lowerchars = ("a".."z"); -my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1); -for (my $ii = 0; $ii < $max; $ii++) -{ - my $pk; - my $tmp = $ii; - foreach (@powers_of_26) - { - $pk .= $lowerchars[$tmp / $_]; - $tmp = $tmp % $_; - } - - my $num = int(rand(25)) + 6; - my $a; - $a .= $chars[rand(@chars)] for 1..$num; - - $num = int(rand(25)) + 6; - my $b; - $b .= $chars[rand(@chars)] for 1..$num; - print $fh "$pk\t$a\t$b\n"; -} -close($fh); -EOF - ---file_exists $file - -# Make sure a snapshot held by another user doesn't block the bulk load -connect (other,localhost,root,,); -set session transaction isolation level repeatable read; -select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; -start transaction with consistent snapshot; -select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; - -connection default; -set rocksdb_bulk_load=1; -set rocksdb_bulk_load_size=100000; ---disable_query_log ---echo LOAD DATA INFILE INTO TABLE t1; -eval LOAD DATA INFILE '$file' INTO TABLE t1; ---echo LOAD DATA INFILE INTO TABLE t2; -eval LOAD DATA INFILE '$file' INTO TABLE t2; ---echo LOAD DATA INFILE INTO TABLE t3; -eval LOAD DATA INFILE '$file' INTO TABLE t3; ---enable_query_log -set rocksdb_bulk_load=0; - -# Make sure row count index stats are correct ---replace_column 6 # 7 # 8 # 9 # -SHOW TABLE STATUS WHERE name LIKE 't%'; - -ANALYZE TABLE t1, t2, t3; - ---replace_column 6 # 7 # 8 # 9 # -SHOW TABLE STATUS WHERE name LIKE 't%'; - -# Make sure all the data is there. -select count(pk) from t1; -select count(a) from t1; -select count(b) from t1; -select count(pk) from t2; -select count(a) from t2; -select count(b) from t2; -select count(pk) from t3; -select count(a) from t3; -select count(b) from t3; - -# Create a dummy file with a bulk load extesion. It should be removed when -# the server starts ---let $tmpext = .bulk_load.tmp ---let $MYSQLD_DATADIR= `SELECT @@datadir` ---let $datadir = $MYSQLD_DATADIR/.rocksdb ---write_file $datadir/test$tmpext -dummy data -EOF ---write_file $datadir/longfilenamethatvalidatesthatthiswillgetdeleted$tmpext -dummy data -EOF - -# Show the files exists ---list_files $datadir *$tmpext - -# Now restart the server and make sure it automatically removes this test file ---source include/restart_mysqld.inc - -# Show the files do not exist ---list_files $datadir *$tmpext - -# Cleanup -disconnect other; -DROP TABLE t1, t2, t3; +--source bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test new file mode 100644 index 0000000000000..284e29d1f5a98 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test @@ -0,0 +1,39 @@ +--source include/have_rocksdb.inc + +CREATE TABLE t1(pk INT, PRIMARY KEY(pk)); + +# Make sure we get an error with out of order keys during bulk load +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(10); +INSERT INTO t1 VALUES(11); +--error ER_KEYS_OUT_OF_ORDER +INSERT INTO t1 VALUES(9); +SET rocksdb_bulk_load=0; + +# Make sure only 10 and 11 got into the table +SELECT * FROM t1; + +# Make sure we get an error with overlapping data +SET rocksdb_bulk_load=1; +INSERT INTO t1 VALUES(1); +INSERT INTO t1 VALUES(2); +INSERT INTO t1 VALUES(20); +INSERT INTO t1 VALUES(21); + +# This last crashes the server (intentionally) because we can't return any +# error information from a SET = +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--error 2013 +SET rocksdb_bulk_load=0; + +--exec grep "RocksDB: Error 197 finalizing last SST file while setting bulk loading variable" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err + +# restart the crashed server +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + +# Make sure the error exists in the .err log and then restart the server +--enable_reconnect +--source include/wait_until_connected_again.inc + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test new file mode 100644 index 0000000000000..5aec6ff5e99ac --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let pk_cf=rev:cf1 +--let data_order_desc=0 + +--source bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test new file mode 100644 index 0000000000000..83006f9e446bd --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let pk_cf=rev:cf1 +--let data_order_desc=1 + +--source bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test new file mode 100644 index 0000000000000..df7b6a7c821be --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let pk_cf=cf1 +--let data_order_desc=1 + +--source bulk_load.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation.test b/storage/rocksdb/mysql-test/rocksdb/t/collation.test index 22bd5db705088..9d20f7168dcc4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation.test @@ -12,9 +12,9 @@ CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), va DROP TABLE t1; # ci indexed column is not allowed ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value3(50))) engine=rocksdb charset utf8; # ci indexed column with rocksdb_strict_collation_check=OFF is allowed. SET GLOBAL rocksdb_strict_collation_check=0; @@ -42,20 +42,20 @@ DROP TABLE t1; SET GLOBAL rocksdb_strict_collation_exceptions=t1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test regex for exception list SET GLOBAL rocksdb_strict_collation_exceptions="t.*"; CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t123; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE s123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions=".t.*"; CREATE TABLE xt123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE xt123; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE t123 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list with commas @@ -64,7 +64,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list with vertical bar @@ -73,7 +73,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and extra comma at the front @@ -82,7 +82,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and extra vertical bar at the front @@ -91,7 +91,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and extra comma in the middle @@ -100,7 +100,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and extra vertical bar in the middle @@ -109,7 +109,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and extra comma at the end @@ -118,7 +118,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and extra vertical bar at the end @@ -127,7 +127,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test multiple entries in the list and tons of commas and vertical bars just for the fun of it @@ -136,7 +136,7 @@ CREATE TABLE s1 (id INT primary key, value varchar(50), index(value)) engine=roc DROP TABLE s1; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE u1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb charset utf8; # test allowing alters to create temporary tables @@ -144,10 +144,10 @@ SET GLOBAL rocksdb_strict_collation_exceptions='t1'; CREATE TABLE t1 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; ALTER TABLE t1 AUTO_INCREMENT=1; DROP TABLE t1; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE t2 (id INT primary key, value varchar(50), index(value)) engine=rocksdb; CREATE TABLE t2 (id INT primary key, value varchar(50)) engine=rocksdb; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION ALTER TABLE t2 ADD INDEX(value); DROP TABLE t2; @@ -156,12 +156,12 @@ DROP TABLE t2; --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="[a-b"; --exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]"; CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; CREATE TABLE b (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE a, b; @@ -169,11 +169,11 @@ DROP TABLE a, b; --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err SET GLOBAL rocksdb_strict_collation_exceptions="abc\\"; --exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; SET GLOBAL rocksdb_strict_collation_exceptions="abc"; CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; ---error ER_UNKNOWN_ERROR +--error ER_UNSUPPORTED_COLLATION CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8; DROP TABLE abc; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test index 7f741e286b1f9..334b1bb2750b8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/collation_exception.test @@ -1,3 +1,5 @@ +--source include/have_rocksdb.inc + CREATE TABLE `r1.lol` ( `c1` int(10) NOT NULL DEFAULT '0', `c2` int(11) NOT NULL DEFAULT '0', diff --git a/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc index be01338cb850a..ca7510b1253e6 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/consistent_snapshot.inc @@ -23,7 +23,7 @@ eval SET SESSION TRANSACTION ISOLATION LEVEL $trx_isolation; # no external inserts should be visible to the transaction. # But it should only work this way for REPEATABLE-READ and SERIALIZABLE ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; --echo ERROR: $mysql_errno @@ -38,7 +38,7 @@ connection con2; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; connection con1; ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; --echo ERROR: $mysql_errno @@ -79,7 +79,7 @@ SELECT * FROM r1; # 5 COMMIT; SELECT * FROM r1; # 6 ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; --echo ERROR: $mysql_errno @@ -97,7 +97,7 @@ SELECT * FROM r1; # 6 COMMIT; SELECT * FROM r1; # 8 ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; --echo ERROR: $mysql_errno @@ -105,7 +105,7 @@ connection con2; INSERT INTO r1 values (9,9,9); connection con1; ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; --echo ERROR: $mysql_errno @@ -115,11 +115,11 @@ INSERT INTO r1 values (10,10,10); connection con1; SELECT * FROM r1; # 9 ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; --echo ERROR: $mysql_errno # Succeeds with Read Committed, Fails with Repeatable Read ---error 0,ER_UNKNOWN_ERROR +--error 0,ER_UPDATES_WITH_CONSISTENT_SNAPSHOT INSERT INTO r1 values (11,11,11); --echo ERROR: $mysql_errno SELECT * FROM r1; # self changes should be visible diff --git a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test index b0e74221702fa..c8735c346e4bf 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/corrupted_data_reads_debug.test @@ -24,7 +24,7 @@ select * from t1; set @tmp1=@@rocksdb_verify_row_debug_checksums; set rocksdb_verify_row_debug_checksums=1; set session debug= "+d,myrocks_simulate_bad_row_read1"; ---error ER_GET_ERRNO +--error ER_GET_ERRMSG select * from t1 where pk=1; set session debug= "-d,myrocks_simulate_bad_row_read1"; set rocksdb_verify_row_debug_checksums=@tmp1; @@ -32,12 +32,12 @@ set rocksdb_verify_row_debug_checksums=@tmp1; select * from t1 where pk=1; set session debug= "+d,myrocks_simulate_bad_row_read2"; ---error ER_GET_ERRNO +--error ER_GET_ERRMSG select * from t1 where pk=1; set session debug= "-d,myrocks_simulate_bad_row_read2"; set session debug= "+d,myrocks_simulate_bad_row_read3"; ---error ER_GET_ERRNO +--error ER_GET_ERRMSG select * from t1 where pk=1; set session debug= "-d,myrocks_simulate_bad_row_read3"; @@ -58,7 +58,7 @@ create table t2 ( insert into t2 values ('ABCD',1); select * from t2; set session debug= "+d,myrocks_simulate_bad_pk_read1"; ---error ER_GET_ERRNO +--error ER_GET_ERRMSG select * from t2; set session debug= "-d,myrocks_simulate_bad_pk_read1"; @@ -73,7 +73,7 @@ insert into t2 values ('ABCD',1); select * from t2; set session debug= "+d,myrocks_simulate_bad_pk_read1"; ---error ER_GET_ERRNO +--error ER_GET_ERRMSG select * from t2; set session debug= "-d,myrocks_simulate_bad_pk_read1"; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/deadlock_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/deadlock_stats.test new file mode 100644 index 0000000000000..a9b30a4273a54 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/deadlock_stats.test @@ -0,0 +1,3 @@ +let $engine=rocksdb; + +--source include/deadlock_stats.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/delete.test b/storage/rocksdb/mysql-test/rocksdb/t/delete.test index b1654e606a5c5..c829dd9da3b04 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/delete.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/delete.test @@ -90,9 +90,9 @@ DELETE FROM t1 WHERE a <= 4 ORDER BY b DESC LIMIT 1; SAVEPOINT spt1; DELETE FROM t1; INSERT INTO t1 (a,b) VALUES (1,'a'); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT ROLLBACK TO SAVEPOINT spt1; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY COMMIT; --sorted_result SELECT a,b FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt index f53a6050e8940..4afcf2caa5d94 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt @@ -1,3 +1,4 @@ ---rocksdb_max_background_compactions=8 --rocksdb_max_subcompactions=1 +--rocksdb_info_log_level=info_level --rocksdb_default_cf_options=write_buffer_size=512k;target_file_size_base=512k;level0_file_num_compaction_trigger=2;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;max_bytes_for_level_base=1m + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test index 7b28474d9f253..7867caf1c3c9d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -106,10 +106,5 @@ let $wait_condition = select count(*) = 0 where TYPE = 'DDL_DROP_INDEX_ONGOING'; --source include/wait_condition.inc -# Get list of all indices needing to be dropped -# Check total compacted-away rows for all indices -# Check that all indices have been successfully dropped ---exec perl suite/rocksdb/t/drop_table_compactions.pl $MYSQLTEST_VARDIR/log/mysqld.1.err - # Cleanup drop table t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test index 3742ab0e4440c..ac6b17461e257 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table2.test @@ -87,6 +87,10 @@ let $max = 1000; let $table = t5; --source drop_table_repopulate_table.inc +set @@global.rocksdb_compact_cf = 'cf1'; +set @@global.rocksdb_compact_cf = 'rev:cf2'; +set @@global.rocksdb_compact_cf = 'default'; + let $output= $MYSQLTEST_VARDIR/tmp/size_output; --exec du -c $MYSQLTEST_VARDIR/mysqld.1/data/.rocksdb/*.sst |grep total |sed 's/[\t]total/ before/' > $output @@ -96,6 +100,10 @@ drop table t3; drop table t4; drop table t5; +set @@global.rocksdb_compact_cf = 'cf1'; +set @@global.rocksdb_compact_cf = 'rev:cf2'; +set @@global.rocksdb_compact_cf = 'default'; + let $show_rpl_debug_info= 1; # to force post-failure printout let $wait_timeout= 300; # Override default 30 seconds with 300. let $wait_condition = select count(*) = 0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl b/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl deleted file mode 100755 index b123ac5492fca..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table_compactions.pl +++ /dev/null @@ -1,37 +0,0 @@ -sub print_array { - $str = shift; - @arr = @_; - $prev= 0; - foreach (@arr) { - if ($prev) { - $dummy_idx = $_ - $prev; - }else { - $dummy_idx = 0; - } - $prev= $_; - print "$str $dummy_idx\n"; - } -} - -while (<>) { - if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { - $a{$1} += $2; - } - if (/Begin filtering dropped index \(\d+,(\d+)\)/) { - push @b, $1; - } - if (/Finished filtering dropped index \(\d+,(\d+)\)/) { - push @c, $1; - } -} -$prev= 0; -foreach (sort {$a <=> $b} keys %a){ - if ($prev) { - $dummy_idx= $_ - $prev; - }else { - $dummy_idx= 0; - } - $prev= $_; -} -print_array("Begin filtering dropped index+", sort {$a <=> $b} @b); -print_array("Finished filtering dropped index+", sort {$a <=> $b} @c); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test index 781163f34fb1c..98036359fc8c5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test @@ -8,7 +8,7 @@ INSERT INTO t values (1), (2), (3); CREATE TABLE t(id int primary key) engine=rocksdb; FLUSH TABLES; --exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp ---error ER_UNKNOWN_ERROR +--error ER_METADATA_INCONSISTENCY CREATE TABLE t(id int primary key) engine=rocksdb; --exec mv $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm FLUSH TABLES; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test index bd8071b1b5ea4..675a337c24d28 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/foreign_key.test @@ -1,3 +1,5 @@ +--source include/have_rocksdb.inc + --disable_warnings DROP TABLE IF EXISTS t1, t2; --enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb-master.opt new file mode 100644 index 0000000000000..c07b063f07c1f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=off --binlog_format=row --log-bin diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test new file mode 100644 index 0000000000000..abf8d71911b1c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb.test @@ -0,0 +1,109 @@ +--source include/have_rocksdb.inc + +# +# index_merge_rocksdb test copied over from index_merge_ror.inc +# +# Triggers issue # https://github.com/facebook/mysql-5.6/issues/604 + +CREATE TABLE t1 +( + /* fields/keys for row retrieval tests */ + key1 INT, + key2 INT, + key3 INT, + key4 INT, + + /* make rows much bigger then keys */ + filler1 CHAR(200), + + KEY(key1), + KEY(key2) +) ENGINE=ROCKSDB; + +# fill table +CREATE TABLE t0 AS SELECT * FROM t1; +--disable_query_log +--echo # Printing of many insert into t0 values (....) disabled. +let $cnt=100; +while ($cnt) +{ + eval INSERT INTO t0 VALUES (0, 0, 0, 0, 'data1'); + dec $cnt; +} + +--echo # Printing of many insert into t1 select .... from t0 disabled. +let $1=4; +while ($1) +{ + let $2=4; + while ($2) + { + let $3=4; + while ($3) + { + eval INSERT INTO t1 SELECT key1, key2, key3, key4, filler1 FROM t0; + dec $3; + } + dec $2; + } + dec $1; +} + +--echo # Printing of many insert into t1 (...) values (....) disabled. +# Row retrieval tests +# -1 is used for values 'out of any range we are using' +# insert enough rows for index intersection to be used for (key1,key2) +INSERT INTO t1 (key1, key2, key3, key4, filler1) VALUES (100, 100, 100, 100,'key1-key2-key3-key4'); +let $cnt=400; +while ($cnt) +{ + eval INSERT INTO t1 (key1, key2, key3, key4, filler1) VALUES (100, -1, 100, -1,'key1-key3'); + dec $cnt; +} +let $cnt=400; +while ($cnt) +{ + eval INSERT INTO t1 (key1, key2, key3, key4, filler1) VALUES (-1, 100, -1, 100,'key2-key4'); + dec $cnt; +} +--enable_query_log + +SELECT COUNT(*) FROM t1; + +-- disable_query_log +-- disable_result_log +ANALYZE TABLE t1; +-- enable_result_log +-- enable_query_log + +SET GLOBAL rocksdb_force_flush_memtable_now = 1; + +--replace_column 9 # +EXPLAIN UPDATE t1 SET filler1='to be deleted' WHERE key1=100 AND key2=100; +UPDATE t1 SET filler1='to be deleted' WHERE key1=100 and key2=100; + +DROP TABLE t0, t1; + +# Issue624 - MyRocks executes index_merge query plan incorrectly +create table t1 (key1 int, key2 int, key3 int, key (key1), key (key2), key(key3)) engine=rocksdb; +insert into t1 values (1, 100, 100), (1, 200, 200), (1, 300, 300); +--disable_query_log +let $i = 1; +while ($i <= 1000) { + let $insert = INSERT INTO t1 VALUES(1000,1000,1000); + inc $i; + eval $insert; +} +--enable_query_log +analyze table t1; +set global rocksdb_force_flush_memtable_now=1; + +--replace_column 9 # +explain select * from t1 where key1 = 1; +--replace_column 9 # +explain select key1,key2 from t1 where key1 = 1 or key2 = 1; +select * from t1 where key1 = 1; +select key1,key2 from t1 where key1 = 1 or key2 = 1; + +drop table t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2-master.opt new file mode 100644 index 0000000000000..c07b063f07c1f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2-master.opt @@ -0,0 +1 @@ +--rocksdb_strict_collation_check=off --binlog_format=row --log-bin diff --git a/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test new file mode 100644 index 0000000000000..a4d26cf773909 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/index_merge_rocksdb2.test @@ -0,0 +1,70 @@ +# Skiping this test from Valgrind execution as per Bug-14627884 +--source include/not_valgrind.inc +# Adding big test option for this test. +--source include/big_test.inc + +# t/index_merge_innodb.test +# +# Index merge tests +# +# Last update: +# 2006-08-07 ML test refactored (MySQL 5.1) +# Main code of several index_merge tests +# -> include/index_merge*.inc +# wrapper t/index_merge_innodb.test sources now several +# include/index_merge*.inc files +# + +--source include/have_rocksdb.inc +let $engine_type= RocksDB; +# skipping because too unstable in MyRocks +let $skip_ror_EXPLAIN_for_MyRocks = 1; +let $random_rows_in_EXPLAIN = 1; +let $sorted_result = 1; +# According to Oracle: "InnoDB's estimate for the index cardinality +# depends on a pseudo random number generator (it picks up random +# pages to sample). After an optimization that was made in r2625 two +# EXPLAINs started returning a different number of rows (3 instead of +# 4)", so: +let $index_merge_random_rows_in_EXPLAIN = 1; +# RocksDB does not support Merge tables (affects include/index_merge1.inc) +let $merge_table_support= 0; + +set global rocksdb_force_flush_memtable_now=1; +--source include/index_merge1.inc +set global rocksdb_force_flush_memtable_now=1; +--source include/index_merge_ror.inc +set global rocksdb_force_flush_memtable_now=1; +--source include/index_merge2.inc + +set global rocksdb_force_flush_memtable_now=1; +--source include/index_merge_2sweeps.inc +set global rocksdb_force_flush_memtable_now=1; +--source include/index_merge_ror_cpk.inc + +set global rocksdb_force_flush_memtable_now=1; +--echo # +--echo # Bug#11747423 32254: INDEX MERGE USED UNNECESSARILY +--echo # +CREATE TABLE t1 ( + id INT NOT NULL PRIMARY KEY, + id2 INT NOT NULL, + id3 INT NOT NULL, + KEY (id2), + KEY (id3), + KEY covering_index (id2,id3) +) ENGINE=RocksDB; + +INSERT INTO t1 VALUES (0, 0, 0), (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7); +INSERT INTO t1 SELECT id + 8, id2 + 8, id3 +8 FROM t1; +INSERT INTO t1 SELECT id + 16, 7, 0 FROM t1; + +-- disable_query_log +-- disable_result_log +analyze table t1; +-- enable_result_log +-- enable_query_log + +EXPLAIN SELECT SQL_NO_CACHE count(*) FROM t1 WHERE id2=7 AND id3=0; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test index c20ab17ff6cff..99122a739fe80 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/information_schema.test @@ -44,12 +44,10 @@ CREATE TABLE t2 ( b int, c int, d int, - e int, PRIMARY KEY (a) COMMENT "cf_a", KEY (b) COMMENT "cf_b", KEY (c) COMMENT "cf_c", - KEY (d) COMMENT "$per_index_cf", - KEY (e) COMMENT "rev:cf_d") ENGINE=ROCKSDB; + KEY (d) COMMENT "rev:cf_d") ENGINE=ROCKSDB; select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where TYPE = 'CF_FLAGS'; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/insert.test b/storage/rocksdb/mysql-test/rocksdb/t/insert.test index 14cfe1cadb86f..aebd3f76e871d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/insert.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/insert.test @@ -86,10 +86,10 @@ INSERT t1 (a) VALUE (10),(20); SAVEPOINT spt1; INSERT INTO t1 SET a = 11, b = 'f'; INSERT t1 SET b = DEFAULT; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT ROLLBACK TO SAVEPOINT spt1; INSERT INTO t1 (b,a) VALUES ('test1',10); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY COMMIT; --sorted_result SELECT a,b FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue243_transactionStatus.test b/storage/rocksdb/mysql-test/rocksdb/t/issue243_transactionStatus.test new file mode 100644 index 0000000000000..0997bde3f49b2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue243_transactionStatus.test @@ -0,0 +1,80 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 ( + id INT, + val1 INT, + val2 INT, + PRIMARY KEY (id) +) ENGINE=rocksdb; + +# +# DB operations without Transaction , every count should be 0 +# so there's no count related output +# +INSERT INTO t1 VALUES(1,1,1),(2,1,2); +SELECT * FROM t1; +UPDATE t1 SET val1=2 WHERE id=2; +SELECT * FROM t1; +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +# +# DB operations with Tansaction, insert_count, update_count, delete_count +# and total write_count should be printed +# Cases: rollback, commit transaction +# +SET AUTOCOMMIT=0; +START TRANSACTION; +INSERT INTO t1 VALUES(20,1,1),(30,30,30); +SELECT * FROM t1; +UPDATE t1 SET val1=20, val2=20 WHERE id=20; +SELECT * FROM t1; +DELETE FROM t1 WHERE id=30; +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +ROLLBACK; +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +START TRANSACTION; +INSERT INTO t1 VALUES(40,40,40); +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; +COMMIT; +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; + +SET AUTOCOMMIT=1; +DROP TABLE t1; + +# +# Secondary Key Tests +# +--disable_warnings +DROP TABLE IF EXISTS t2; +--enable_warnings + +CREATE TABLE t2 ( + id1 INT, + id2 INT, + value INT, + PRIMARY KEY (id1), + KEY (id2) +) ENGINE=rocksdb; + +SET AUTOCOMMIT=0; +START TRANSACTION; +INSERT INTO t2 VALUES(1,2,0),(10,20,30); +UPDATE t2 SET value=3 WHERE id2=2; +DELETE FROM t2 WHERE id1=10; + +--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /(ACTIVE) [0-9]+ /\1 NUM / /(thread id) [0-9]+/\1 TID/ /0x[0-9a-f]+/PTR/ /(query id) [0-9]+/\1 QID/ /(root) [a-z ]+/\1 ACTION/ +SHOW ENGINE rocksdb TRANSACTION STATUS; +ROLLBACK; +SET AUTOCOMMIT=1; +DROP TABLE t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue255.test b/storage/rocksdb/mysql-test/rocksdb/t/issue255.test new file mode 100644 index 0000000000000..4f1927d366cc7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue255.test @@ -0,0 +1,19 @@ +--source include/have_rocksdb.inc + +CREATE TABLE t1 (pk BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT); + +INSERT INTO t1 VALUES (5); +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +INSERT INTO t1 VALUES ('538647864786478647864'); +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +--error ER_DUP_ENTRY +INSERT INTO t1 VALUES (); +SELECT * FROM t1; +--replace_column 3 # 6 # 7 # 8 # 9 # 10 # +SHOW TABLE STATUS LIKE 't1'; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue314.test b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test index 2059eef2195b3..822969a380fa5 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/issue314.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue314.test @@ -8,7 +8,7 @@ SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; CREATE TABLE t1(a int); SET TRANSACTION ISOLATION LEVEL READ COMMITTED; INSERT INTO t1 VALUES(1); ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_MODE_NOT_SUPPORTED select * from t1; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; select * from t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test index bb215ebcd994a..5dcc7c19ba957 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/issue495.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/issue495.test @@ -1,4 +1,9 @@ +--source include/have_rocksdb.inc +--source include/have_partition.inc +--disable_warnings drop table if exists t; +--enable_warnings + create table t ( a int, b int, diff --git a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc index 5d3678f5f27c3..1d83598a28233 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/loaddata.inc @@ -98,7 +98,7 @@ EOF --replace_result $datadir if ($skip_unique_check == 1) { - --error ER_UNKNOWN_ERROR + --error ER_ON_DUPLICATE_DISABLED eval LOAD DATA INFILE '$datadir/se_replacedata.dat' REPLACE INTO TABLE t1; } diff --git a/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test b/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test new file mode 100644 index 0000000000000..f1777ea3e93a5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/lock_wait_timeout_stats.test @@ -0,0 +1,34 @@ +create table t (a int primary key) engine=rocksdb; + +begin; +insert into t values (0); + +--source include/count_sessions.inc +--connect (con1,localhost,root,,) +--connection con1 +set @@rocksdb_lock_wait_timeout=1; +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +begin; + +--connect (con2,localhost,root,,) +--connection con2 +set @@rocksdb_lock_wait_timeout=1; +begin; + +--connection con1 +--error ER_LOCK_WAIT_TIMEOUT +insert into t values(0); +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; + +--connection con2 +--error ER_LOCK_WAIT_TIMEOUT +insert into t values(0); +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; +select ROW_LOCK_WAIT_TIMEOUTS from information_schema.table_statistics where table_name="t"; + +--disconnect con1 +--connection default +--disconnect con2 +drop table t; +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/native_procedure-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/native_procedure-master.opt new file mode 100644 index 0000000000000..6c4cea3d4b7c4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/native_procedure-master.opt @@ -0,0 +1 @@ +$NP_EXAMPLE_LIB_OPT diff --git a/storage/rocksdb/mysql-test/rocksdb/t/native_procedure.test b/storage/rocksdb/mysql-test/rocksdb/t/native_procedure.test new file mode 100644 index 0000000000000..d4f38a607cf41 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/native_procedure.test @@ -0,0 +1,2 @@ +let $engine=rocksdb; +--source include/native_procedure.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test b/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test index beccc8a6b8ea9..db66da3b0dc9c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/optimizer_loose_index_scans.test @@ -1,3 +1,4 @@ +--source include/have_rocksdb.inc let $engine=rocksdb; --source include/loose_index_scans.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override-master.opt new file mode 100644 index 0000000000000..ca7e3636645a1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=write_buffer_size=64k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:24;disable_auto_compactions=true diff --git a/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test b/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test new file mode 100644 index 0000000000000..13d76bb5a3f8a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test @@ -0,0 +1,96 @@ +--source include/have_rocksdb.inc + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1 (id1 BIGINT, id2 BIGINT, id3 BIGINT, id4 BIGINT, PRIMARY KEY (id1, id2, id3, id4) comment 'cf1') ENGINE=rocksdb collate latin1_bin; +--disable_query_log +let $i = 1; +while ($i <= 100) { + let $insert = INSERT INTO t1 VALUES(1, $i, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log +set global rocksdb_force_flush_memtable_now = 1; + +--echo +--echo Original Prefix Extractor: +--echo +--sorted_result +SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%'; + +# BF used (4+8+8+8) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +--exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err +--let $_mysqld_option=--rocksdb_override_cf_options=cf1={prefix_extractor=capped:26}; + +--echo +--echo Prefix Extractor (after override_cf_options set, should not be changed): +--echo +--sorted_result +SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%'; + +# This should no longer crash. See https://github.com/facebook/mysql-5.6/issues/641 +--echo +--echo Restarting with new Prefix Extractor... +--echo +--source include/restart_mysqld_with_option.inc + +--echo +--echo Changed Prefix Extractor (after restart): +--echo +--sorted_result +SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%'; + +# Satisfies can_use_bloom_filter (4+8+8+8), but can't use because the old SST +# files have old prefix extractor +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# Insert more data into t1, verify it uses new bloom filter +--disable_query_log +let $i = 101; +while ($i <= 200) { + let $insert = INSERT INTO t1 VALUES(1, $i, $i, $i); + eval $insert; + inc $i; +} +--enable_query_log + +set global rocksdb_force_flush_memtable_now = 1; + +# BF used w/ new prefix extractor (4+8+8+8) (still increments once because it +# needs to check the new SST file, but doesnt increment for SST file with old +# extractor) +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +# should have 2 sst files, one with old prefix extractor and one with new +SELECT COUNT(*) FROM information_schema.rocksdb_index_file_map WHERE COLUMN_FAMILY != 1; + +# update some old data, force compaction, verify that new SST files use +# new bloom filter +UPDATE t1 SET id1=1,id2 = 30,id3 = 30 WHERE id4 >= 0 AND id4 <=10; +set global rocksdb_force_flush_memtable_now = 1; + +# should have 3 sst files, one with old prefix extractor and two with new +SELECT COUNT(*) FROM information_schema.rocksdb_index_file_map WHERE COLUMN_FAMILY != 1; +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +set global rocksdb_compact_cf='cf1'; + +# Select the updated, make sure bloom filter is checked now +select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; +SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=30 AND id3=30; +select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test index 56070652618b3..52f65095d338f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test @@ -37,7 +37,7 @@ connection con1; select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; select case when variable_value-@s < 100 then 'true' else variable_value-@s end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; SELECT * FROM t1; ---error ER_UNKNOWN_ERROR +--error ER_UPDATES_WITH_CONSISTENT_SNAPSHOT INSERT INTO t1 values (2, 2); ROLLBACK; SELECT * FROM t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test index 5581ed3f95a58..adf829a1e37a3 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test @@ -1135,7 +1135,7 @@ set @a=-1; insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables limit 100; set @tmp1= @@rocksdb_max_row_locks; set rocksdb_max_row_locks= 20; ---error ER_GET_ERRMSG +--error ER_RDB_STATUS_GENERAL update t1 set a=a+10; DROP TABLE t1; @@ -1350,7 +1350,9 @@ DROP TABLE t1; --echo # --echo # Issue #17: Automatic per-index column families +--echo # (Now deprecated) --echo # +--error ER_PER_INDEX_CF_DEPRECATED create table t1 ( id int not null, key1 int, @@ -1358,44 +1360,6 @@ create table t1 ( index (key1) comment '$per_index_cf' ) engine=rocksdb; ---echo #Same CF ids with different CF flags ---error ER_UNKNOWN_ERROR -create table t1_err ( - id int not null, - key1 int, - PRIMARY KEY (id), - index (key1) comment 'test.t1.key1' -) engine=rocksdb; - -create table t1_err ( - id int not null, - key1 int, - PRIMARY KEY (id), - index (key1) comment 'test.t1.key2' -) engine=rocksdb; -drop table t1_err; - ---echo # Unfortunately there is no way to check which column family everything goes to -insert into t1 values (1,1); -select * from t1; ---echo # Check that ALTER and RENAME are disallowed ---error ER_NOT_SUPPORTED_YET -alter table t1 add col2 int; - ---error ER_NOT_SUPPORTED_YET -rename table t1 to t2; - -drop table t1; - ---echo # Check detection of typos in \$per_index_cf ---error ER_NOT_SUPPORTED_YET -create table t1 ( - id int not null, - key1 int, - PRIMARY KEY (id), - index (key1) comment '$per_idnex_cf' -)engine=rocksdb; - --echo # --echo # Issue #22: SELECT ... FOR UPDATE takes a long time @@ -1787,9 +1751,9 @@ INSERT INTO t1 VALUES(1, 1); INSERT INTO t1 VALUES(1, 2); INSERT INTO t1 VALUES(1, 3); SELECT * FROM t1; ---error ER_UNKNOWN_ERROR +--error ER_ON_DUPLICATE_DISABLED REPLACE INTO t1 VALUES(4, 4); ---error ER_UNKNOWN_ERROR +--error ER_ON_DUPLICATE_DISABLED INSERT INTO t1 VALUES(5, 5) ON DUPLICATE KEY UPDATE value=value+1; TRUNCATE TABLE t1; SET @save_rocksdb_bulk_load_size= @@rocksdb_bulk_load_size; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test index 7cffa2e62a683..f2d566674963f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_cf_per_partition.test @@ -418,6 +418,7 @@ ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5'; # Verify that correct partition and key are used when searching. +ANALYZE TABLE t2; EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567; DROP TABLE t2; @@ -492,3 +493,18 @@ SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name=' DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; + + +# Test that truncating table actually removes rows. +CREATE TABLE t1 ( + a INT NOT NULL, + PRIMARY KEY (a) COMMENT 'p1_cfname=foo;' +) ENGINE=ROCKSDB +PARTITION BY LIST COLUMNS(a) +(PARTITION p1 VALUES IN (1) ENGINE = ROCKSDB); + +INSERT INTO t1 values (1); +TRUNCATE TABLE t1; +SELECT * FROM t1; +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test index 8543ce81de403..c46d8b0763751 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rollback_savepoint.test @@ -1,3 +1,5 @@ +--source include/have_rocksdb.inc + --disable_warnings DROP TABLE IF EXISTS t1, t2; --enable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test index 0f26c24c27d6a..07d4d938b3b07 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_savepoint.test @@ -16,9 +16,9 @@ begin; insert into t1 values (11, 1); savepoint a; insert into t1 values (12, 1); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT rollback to savepoint a; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY commit; commit; select * from t1; @@ -33,11 +33,11 @@ begin; insert into t1 values (21, 1); savepoint a; insert into t1 values (22, 1); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT rollback to savepoint a; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY insert into t1 values (23, 1); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY commit; commit; select * from t1; @@ -54,9 +54,9 @@ savepoint a; insert into t1 values (32, 1); savepoint b; insert into t1 values (33, 1); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT rollback to savepoint a; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY insert into t1 values (34, 1); rollback; select * from t1; @@ -73,9 +73,9 @@ SAVEPOINT A; select * from t1; SAVEPOINT A; insert into t1 values (35, 35); ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT ROLLBACK TO SAVEPOINT A; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY START TRANSACTION; select * from t1; --source include/sync_slave_sql_with_master.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test index b412626695607..9ee3efbb2972d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rpl_statement.test @@ -10,7 +10,7 @@ connection master; select @@binlog_format; create table t1 (pk int primary key) engine=rocksdb; ---error ER_UNKNOWN_ERROR +--error ER_REQUIRE_ROW_BINLOG_FORMAT insert into t1 values (1),(2),(3); set session rocksdb_unsafe_for_binlog=on; @@ -19,7 +19,7 @@ select * from t1; delete from t1; set session rocksdb_unsafe_for_binlog=off; ---error ER_UNKNOWN_ERROR +--error ER_REQUIRE_ROW_BINLOG_FORMAT insert into t1 values (1),(2),(3); set binlog_format=row; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test index d5914745219e5..16d978c71b768 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rqg_runtime.test @@ -2,6 +2,7 @@ call mtr.add_suppression("Did not write failed "); call mtr.add_suppression("Can't open and lock privilege tables"); +call mtr.add_suppression("Attempt to delete the trigger file"); SET @ORIG_EVENT_SCHEDULER = @@EVENT_SCHEDULER; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test index c8548d968888a..c6ebbfa3f01f1 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/select_for_update_skip_locked_nowait.test @@ -1,3 +1,5 @@ +--source include/have_rocksdb.inc + ############################################################################## ## SKIP LOCKED | NOWAIT are *not* supported for SELECT...FOR UPDATE in RocksDB diff --git a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc index a8d8ed53cba58..c4e69a341eef9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/set_checkpoint.inc @@ -20,7 +20,7 @@ if (!$succeeds) { --disable_result_log --disable_query_log - --error ER_UNKNOWN_ERROR + --error ER_RDB_STATUS_GENERAL eval SET GLOBAL ROCKSDB_CREATE_CHECKPOINT = '$checkpoint'; --enable_query_log --enable_result_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test index 434cfe91248d5..1ae34cc08300f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/show_engine.test @@ -23,7 +23,9 @@ CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB; CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB PARTITION BY KEY(l) PARTITIONS 4; ---replace_column 3 # +SET GLOBAL rocksdb_force_flush_memtable_now=1; + +--replace_column 2 # 3 # SHOW ENGINE rocksdb STATUS; INSERT INTO t1 VALUES (1), (2), (3); @@ -49,7 +51,7 @@ GROUP BY TABLE_NAME, PARTITION_NAME; --replace_column 3 # SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_CF_OPTIONS; - + DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt index 72b3af6bcf739..a3d2d07ec7969 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete-master.opt @@ -1 +1 @@ ---rocksdb_default_cf_options=write_buffer_size=16k +--rocksdb_default_cf_options=write_buffer_size=16k --rocksdb_strict_collation_check=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test index 718f6b7202eeb..5a9d17e0255af 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/singledelete.test @@ -86,4 +86,20 @@ optimize table t5; select case when variable_value-@s > 5 and variable_value-@s < 100 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; select case when variable_value-@d < 10 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_delete'; -DROP TABLE t1, t2, t3, t4, t5; +# SingleDelete used for PK. Verify old PK is always deleted. +CREATE TABLE t6 ( + pk VARCHAR(64) COLLATE latin1_swedish_ci PRIMARY KEY +) ENGINE=RocksDB; +INSERT INTO t6 VALUES ('a'); +SET GLOBAL rocksdb_force_flush_memtable_now=1; +SELECT * FROM t6; +UPDATE t6 SET pk='A' WHERE pk='a'; +SELECT * FROM t6; +DELETE FROM t6 where pk='A'; +--echo SELECT should return nothing; +SELECT * FROM t6; +SET GLOBAL rocksdb_force_flush_memtable_now=1; +--echo SELECT should return nothing; +SELECT * FROM t6; + +DROP TABLE t1, t2, t3, t4, t5, t6; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh index 72442fa1e3e05..7f6320ea5225a 100755 --- a/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh +++ b/storage/rocksdb/mysql-test/rocksdb/t/sst_count_rows.sh @@ -15,10 +15,10 @@ while : ; do # excluding system cf DELETED=`$sst_dump --command=scan --output_hex --file=$f | \ perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \ - grep -e ": 0" -e ": 7" | wc -l` + grep -e ", type:0" -e ", type:7" | wc -l` EXISTS=`$sst_dump --command=scan --output_hex --file=$f | \ perl -ne 'print if(/''(\d\d\d\d\d\d\d\d)/ && $1 >= 8)' | \ - grep ": 1" | wc -l` + grep ", type:1" | wc -l` TOTAL_D=$(($TOTAL_D+$DELETED)) TOTAL_E=$(($TOTAL_E+$EXISTS)) # echo "${f##*/} $DELETED $EXISTS" diff --git a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test index 452a7989b0bd9..c03ab6b5e737c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/trx_info_rpl.test @@ -1,3 +1,4 @@ +--source include/have_rocksdb.inc --source include/master-slave.inc --disable_warnings diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary-master.opt new file mode 100644 index 0000000000000..b991f718a33fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary-master.opt @@ -0,0 +1,2 @@ +--rocksdb_enable_ttl_read_filtering=0 +--rocksdb_default_cf_options=disable_auto_compactions=true diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test new file mode 100644 index 0000000000000..92200e859ed0b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test @@ -0,0 +1,547 @@ +--source include/have_debug.inc +--source include/have_rocksdb.inc + +# Basic TTL test +CREATE TABLE t1 ( +`a` binary(8) NOT NULL, +`b` varbinary(64) NOT NULL, +`c` varbinary(256) NOT NULL, +`ts` bigint(20) UNSIGNED NOT NULL, +`value` mediumblob NOT NULL, +PRIMARY KEY (`b`,`a`,`c`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values ('a', 'b', 'c', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values ('d', 'e', 'f', UNIX_TIMESTAMP(), 'g'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# column before TTL in value +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + b int NOT NULL, + ts bigint(20) UNSIGNED NOT NULL, + c int NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, UNIX_TIMESTAMP(), 5); +INSERT INTO t1 values (2, 4, UNIX_TIMESTAMP(), 6); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# multi-part PK w/ TTL +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + b int NOT NULL, + c int NOT NULL, + ts bigint(20) UNSIGNED NOT NULL, + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, 5, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, 4, 6, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# multi-part PK w/ TTL +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + b int NOT NULL, + c int NOT NULL, + ts bigint(20) UNSIGNED NOT NULL, + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, 5, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, 4, 6, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# nullable column(s) before TTL +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + b int, + c int, + ts bigint(20) UNSIGNED NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, NULL, NULL, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, NULL, NULL, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# variable len columns + null column(s) before TTL +CREATE TABLE t1 ( +`a` binary(8) NOT NULL, +`b` varbinary(64), +`c` varbinary(256), +`ts` bigint(20) UNSIGNED NOT NULL, +`value` mediumblob NOT NULL, +PRIMARY KEY (`a`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values ('a', NULL, 'bc', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values ('d', 'efghijk', NULL, UNIX_TIMESTAMP(), 'l'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# TTL implicitly generated (no ttl column) +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + b int NOT NULL, + c int NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, 5); +INSERT INTO t1 values (2, 4, 6); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# TTL field as the PK +CREATE TABLE t1 ( + a int, + ts bigint(20) UNSIGNED NOT NULL, + PRIMARY KEY (a, ts) +) ENGINE=rocksdb +COMMENT='ttl_duration=5;ttl_col=ts;'; + +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, UNIX_TIMESTAMP()); +SELECT COUNT(*) FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = -10; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +# should all still be there.. +SELECT COUNT(*) FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = 10; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + + +# TTL field inside multi-part pk +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + b int NOT NULL, + ts bigint(20) UNSIGNED NOT NULL, + c int NOT NULL, + PRIMARY KEY (a, ts) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1, 3, UNIX_TIMESTAMP(), 5); +INSERT INTO t1 values (2, 4, UNIX_TIMESTAMP(), 6); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# TTL field inside key with variable length things.. +CREATE TABLE t1 ( +`a` binary(8) NOT NULL, +`b` varbinary(64), +`c` varbinary(256), +`ts` bigint(20) UNSIGNED NOT NULL, +`value` mediumblob NOT NULL, +PRIMARY KEY (`a`, `ts`) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=1;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values ('a', NULL, 'bc', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values ('de', 'fghijk', NULL, UNIX_TIMESTAMP(), 'l'); +set global rocksdb_debug_ttl_rec_ts = 0; +SELECT COUNT(*) FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# should have filtered the rows out since ttl is passed in compaction filter +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# TTL test where you compact (values still exist), real_sleep, then compact again, +# values should now be gone. +CREATE TABLE t1 ( +a INT NOT NULL, +b varbinary(64) NOT NULL, +c varbinary(256) NOT NULL, +ts bigint(20) UNSIGNED NOT NULL, +value mediumblob NOT NULL, +PRIMARY KEY (b,a,c) +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +COMMENT='ttl_duration=10;ttl_col=ts;'; + +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1, 'b', 'c', UNIX_TIMESTAMP(), 'd'); +INSERT INTO t1 values (2, 'e', 'f', UNIX_TIMESTAMP(), 'g'); +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (3, 'i', 'j', UNIX_TIMESTAMP(), 'k'); +INSERT INTO t1 values (4, 'm', 'n', UNIX_TIMESTAMP(), 'o'); +set global rocksdb_debug_ttl_rec_ts = 0; + +# Nothing should get removed here. +set global rocksdb_debug_ttl_snapshot_ts = -3600; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +--sorted_result +SELECT a FROM t1; + +# 1 and 2 should get removed here. +set global rocksdb_compact_cf='default'; +--sorted_result +SELECT a FROM t1; + +# 3 and 4 should get removed here. +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +--sorted_result +SELECT a FROM t1; + +DROP TABLE t1; + +# TTL field with nullable ttl column (should fail) +--error 1948 +CREATE TABLE t1 ( + a bigint(20) UNSIGNED NOT NULL, + b int NOT NULL, + c int NOT NULL, + ts bigint(20), + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +# TTL field with non 8-bit integer column (should fail) +--error 1948 +CREATE TABLE t1 ( + a bigint(20) UNSIGNED NOT NULL, + b int NOT NULL, + c int NOT NULL, + ts int, + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=ts;'; + +# TTL duration as some random garbage value +--error 1949 +CREATE TABLE t1 ( + a bigint(20) UNSIGNED NOT NULL, + b int NOT NULL, + c int NOT NULL, + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=abc;'; + +# TTL col is some column outside of the table +--error 1948 +CREATE TABLE t1 ( + a bigint(20) UNSIGNED NOT NULL, + b int NOT NULL, + c int NOT NULL, + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;ttl_col=abc;'; + +# TTL col must have accompanying duration +--error 1948 +CREATE TABLE t1 ( + a bigint(20) UNSIGNED NOT NULL, + b int NOT NULL, + c int NOT NULL, + PRIMARY KEY (a,c) +) ENGINE=rocksdb +COMMENT='ttl_col=abc;'; + +# Make sure it doesn't filter out things early +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=500;'; + +INSERT INTO t1 values (1); +SELECT COUNT(*) FROM t1; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# Testing altering table comment with updated TTL duration +# This should trigger a rebuild of the table +CREATE TABLE t1 ( + a INT PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; + +INSERT INTO t1 values (1); +SELECT * FROM t1; + +set global rocksdb_debug_ttl_rec_ts = -300; +ALTER TABLE t1 COMMENT = 'ttl_duration=1'; +set global rocksdb_debug_ttl_rec_ts = 0; +SHOW CREATE TABLE t1; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + +# Tables with hidden PK and SK disabled +CREATE TABLE t1 ( + a INT PRIMARY KEY, + b INT +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; + +--error 1947 +ALTER TABLE t1 DROP PRIMARY KEY; +--error 1947 +ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; + +DROP TABLE t1; + +# Test replacing PK, ttl should still work after +CREATE TABLE t1 ( + a INT PRIMARY KEY, + b INT +) ENGINE=rocksdb +COMMENT='ttl_duration=5;'; + +INSERT INTO t1 VALUES (1,1); +INSERT INTO t1 VALUES (2,2); + +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(b); +set global rocksdb_debug_ttl_snapshot_ts = -3600; +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; + +--sorted_result +SELECT COUNT(*) FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set @@global.rocksdb_compact_cf = 'default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; + +--sorted_result +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# Make sure table comment filled with other text before/after will work +# (basically, it needs semicolon before and after) +CREATE TABLE t1 ( + a bigint(20) UNSIGNED NOT NULL, + b int, + PRIMARY KEY (a,b) +) ENGINE=rocksdb +COMMENT='asdadfasdfsadfadf ;ttl_duration=1; asfasdfasdfadfa'; +INSERT INTO t1 values (UNIX_TIMESTAMP(), 1); +SELECT COUNT(*) FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; + +SELECT COUNT(*) FROM t1; + +ALTER TABLE t1 COMMENT = 'adsf;;ttl_duration=5;asfasdfa;ttl_col=a;asdfasdf;'; + +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (UNIX_TIMESTAMP(), 2); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; + +# nothing removed here +set global rocksdb_compact_cf='default'; +SELECT COUNT(*) FROM t1; + +# all removed here +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +SELECT COUNT(*) FROM t1; + +DROP TABLE t1; + +# Test to make sure that TTL retains original timestamp during update +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=5;'; + +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1); +INSERT INTO t1 values (3); +INSERT INTO t1 values (5); +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (7); +INSERT INTO t1 values (9); +set global rocksdb_debug_ttl_rec_ts = 0; + +UPDATE t1 SET a=a+1; +--sorted_result +SELECT * FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# 1,3,5 should be dropped +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; + +# test behaviour on update with TTL column, TTL time can be updated here. +CREATE TABLE t1 ( + a INT, + b bigint(20) UNSIGNED NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=5;ttl_col=b;'; + +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +INSERT INTO t1 values (3, UNIX_TIMESTAMP()); +INSERT INTO t1 values (5, UNIX_TIMESTAMP()); +INSERT INTO t1 values (7, UNIX_TIMESTAMP()); + +set global rocksdb_debug_ttl_rec_ts = 300; +UPDATE t1 SET b=UNIX_TIMESTAMP() WHERE a < 4; +set global rocksdb_debug_ttl_rec_ts = 0; + +--sorted_result +SELECT a FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# 5 and 7 should be gone here +--sorted_result +SELECT a FROM t1; +DROP TABLE t1; + +# Test rows expired stat variable and disable ttl variable +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +INSERT INTO t1 values (3); +set global rocksdb_debug_ttl_rec_ts = 0; + +set global rocksdb_enable_ttl=0; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_enable_ttl=1; +set global rocksdb_compact_cf='default'; + +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +SELECT COUNT(*) FROM t1; +DROP TABLE t1; + + +# Table with TTL won't increment rows expired when no records have been +# compacted +CREATE TABLE t1 ( + a bigint(20) NOT NULL, + PRIMARY KEY (a) +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; + +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +INSERT INTO t1 values (3); + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering-master.opt new file mode 100644 index 0000000000000..aefc2f5da3406 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering-master.opt @@ -0,0 +1 @@ +--rocksdb_default_cf_options=disable_auto_compactions=true diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test new file mode 100644 index 0000000000000..5a694b7b2221c --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_read_filtering.test @@ -0,0 +1,371 @@ +--source include/have_debug.inc +--source include/have_rocksdb.inc + +# The purpose of read filtering for tables with TTL is to ensure that during a +# transaction a key which has expired already but not removed by compaction +# yet, is not returned to the user. +# +# Without this the user might be hit with problems such as disappearing rows +# within a transaction, etc, because the compaction filter ignores snapshots +# when filtering keys. + +# Basic read filtering test +CREATE TABLE t1 ( + a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; + +--sorted_result +SELECT * FROM t1; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; + +DROP TABLE t1; + +# Test that some rows are hidden but others aren't... +CREATE TABLE t1 ( + a int PRIMARY KEY, + b BIGINT UNSIGNED NOT NULL +) ENGINE=rocksdb +COMMENT='ttl_duration=10;'; + +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 300; +INSERT INTO t1 values (2, UNIX_TIMESTAMP()); +INSERT INTO t1 values (3, UNIX_TIMESTAMP()); +set global rocksdb_debug_ttl_rec_ts = 0; + +set global rocksdb_force_flush_memtable_now=1; + +# 1 should be hidden even though compaction hasn't run. +--sorted_result +SELECT a FROM t1; + +set global rocksdb_compact_cf='default'; + +# none should be hidden yet, compaction runs but records aren't expired +--sorted_result +SELECT a FROM t1; + +# all should be hidden now, even though compaction hasn't run again +set global rocksdb_debug_ttl_read_filter_ts = -310; +--sorted_result +SELECT a FROM t1; +set global rocksdb_debug_ttl_read_filter_ts = 0; + +DROP TABLE t1; + +# Test the filtering code explicitly. +CREATE TABLE t1 ( + a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +INSERT INTO t1 values (3); +INSERT INTO t1 values (5); +INSERT INTO t1 values (7); +set global rocksdb_debug_ttl_rec_ts = 0; + +# should return nothing. +--sorted_result +SELECT * FROM t1; + +# disable filtering +set global rocksdb_enable_ttl_read_filtering=0; + +# should return everything +--sorted_result +SELECT * FROM t1; + +# disable filtering +set global rocksdb_enable_ttl_read_filtering=1; + +# should return nothing. +--sorted_result +SELECT * FROM t1; + +DROP TABLE t1; + +# Read filtering index scan tests (None of these queries should return any results) +CREATE TABLE t1 ( + a int, + b int, + c int, + PRIMARY KEY (a,b,c) +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; + +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (0,0,0); +INSERT INTO t1 values (0,0,1); +INSERT INTO t1 values (0,1,0); +INSERT INTO t1 values (0,1,1); +INSERT INTO t1 values (1,1,2); +INSERT INTO t1 values (1,2,1); +INSERT INTO t1 values (1,2,2); +INSERT INTO t1 values (1,2,3); +set global rocksdb_debug_ttl_rec_ts = 0; + +select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_expired'; + +set global rocksdb_force_flush_memtable_now=1; + +# HA_READ_KEY_EXACT, using full key +SELECT * FROM t1 WHERE a=1 AND b=2 AND c=2; + +# HA_READ_KEY_EXACT, not using full key +SELECT * FROM t1 WHERE a = 1; + +# HA_READ_BEFORE_KEY, not using full key +SELECT max(a) from t1 where a < 3; + +#HA_READ_BEFORE_KEY, using full key +SELECT max(a) from t1 where a < 2 AND b = 1 AND c < 3; + +# HA_READ_KEY_OR_NEXT +SELECT min(a) from t1 where a >= 1; + +# HA_READ_AFTER_KEY, /* Find next rec. after key-record */ +SELECT min(a) from t1 where a > 1; + +# HA_READ_PREFIX_LAST, /* Last key with the same prefix */ +select * from t1 where a=1 and b in (1) order by c desc; + +# HA_READ_PREFIX_LAST_OR_PREV, /* Last or prev key with the same prefix */ +select max(a) from t1 where a <=10; + +# need to test read_range_first() +# calls into read_range_next() and uses compare_keys() to see if its out of +# range +select a from t1 where a > 0 and a <= 2; + +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +set global rocksdb_compact_cf='default'; +select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_expired'; +DROP TABLE t1; + +# duplicate PK value attempt to be inserted when old one is expired... +# in this case, we pretend the expired key was not found and insert into PK +CREATE TABLE t1 ( + a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; +set global rocksdb_debug_ttl_rec_ts = -110; +INSERT INTO t1 values (1); +set global rocksdb_debug_ttl_rec_ts = 0; + +SELECT * FROM t1; + +# this should work, even if old value is not filtered out yet. +INSERT INTO t1 values (1); + +# should show (1) result +SELECT * FROM t1; + +DROP TABLE t1; + +# Attempt to update expired value, should filter out +set global rocksdb_force_flush_memtable_now=1; +CREATE TABLE t1 ( + a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; +set global rocksdb_debug_ttl_rec_ts = -100; +INSERT INTO t1 values (1); +set global rocksdb_debug_ttl_rec_ts = 0; + +--sorted_result +SELECT * FROM t1; + +# No error is thrown here, under the hood rnd_next_with_direction is +# filtering out the record from being seen in the first place. +UPDATE t1 set a = 1; +DROP TABLE t1; + +## +## More tests on update behaviour with expired keys. +## +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +CREATE TABLE t1 ( + a int PRIMARY KEY, + b int +) ENGINE=rocksdb +COMMENT='ttl_duration=100;'; + +set global rocksdb_debug_ttl_rec_ts = -110; +INSERT INTO t1 values (1,1); +INSERT INTO t1 values (3,3); +set global rocksdb_debug_ttl_rec_ts = 0; +INSERT INTO t1 values (5,5); + +# expired key (1) is still around under the hood, but +# this time rnd_next_with_direction finds non-expired key (5). So the +# execution flow in the SQL layer moves onto update_write_row, where it then +# finds the duplicate key (1). But the duplicate key is expired, so it allows +# the overwrite. +UPDATE t1 set a = 1; + +--sorted_result +SELECT * FROM t1; + +set global rocksdb_enable_ttl_read_filtering=0; +# 1,1 should be gone, even with read filtering disabled as it has been +# overwritten +--sorted_result +SELECT * FROM t1; +set global rocksdb_enable_ttl_read_filtering=1; + +# get_row_by_rowid tested here via index_read_map_impl +UPDATE t1 set a = 999 where a = 1; +--sorted_result +SELECT * FROM t1; + +UPDATE t1 set a = a - 1; +--sorted_result +SELECT * FROM t1; + +DROP TABLE t1; + +# Ensure no rows can disappear in the middle of long-running transactions +# Also ensure repeatable-read works as expected +--source include/count_sessions.inc +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +CREATE TABLE t1 ( + a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=5;'; + +INSERT INTO t1 values (1); + +connection con1; +--echo # Creating Snapshot (start transaction) +BEGIN; + +# We need the below snippet in case establishing con1 took an arbitrary +# amount of time. See https://github.com/facebook/mysql-5.6/pull/617#discussion_r120525391. +--disable_query_log +--let $snapshot_size= `SELECT COUNT(*) FROM t1` +--let $i= 0 +while ($snapshot_size != 1) +{ + if ($i == 1000) + { + --die Your testing host is too slow for reasonable TTL testing + } + + $i++; + ROLLBACK; + INSERT INTO t1 values (1); + BEGIN; + --let $snapshot_size= `SELECT COUNT(*) FROM t1` +} +--enable_query_log + +# Nothing filtered out here +--sorted_result +SELECT * FROM t1; + +--sleep 5 + +--sorted_result +SELECT * FROM t1; # <= shouldn't be filtered out here + +--echo # Switching to connection 2 +connection con2; +# compaction doesn't do anythign since con1 snapshot is still open +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; +# read filtered out, because on a different connection, on +# this connection the records have 'expired' already so they are filtered out +# even though they have not yet been removed by compaction +--sorted_result +SELECT * FROM t1; + +--echo # Switching to connection 1 +connection con1; +--sorted_result +SELECT * FROM t1; # <= shouldn't be filtered out here + +UPDATE t1 set a = a + 1; +--sorted_result +SELECT * FROM t1; # <= shouldn't be filtered out here + +COMMIT; + +--sorted_result # <= filtered out here because time has passed. +SELECT * FROM t1; + +DROP TABLE t1; +disconnect con1; +disconnect con2; + +#transaction 1, create a snapshot and select * => returns nothing. +#transaction 2, insert into table, flush +#transaction 1, select * => returns nothing, but the snapshot should prevent the compaction code from removing the rows, no matter what the ttl duration is. +#transaction 2, select * -> sees nothing, disable filter, select * -> sees everything, enable filter, select * -> sees nothing. +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +CREATE TABLE t1 ( + a int PRIMARY KEY +) ENGINE=rocksdb +COMMENT='ttl_duration=1;'; + +--echo # On Connection 1 +connection con1; +--echo # Creating Snapshot (start transaction) +BEGIN; +--sorted_result +SELECT * FROM t1; +# Sleep 5 secs after creating snapshot, this ensures any records created after +# this can't be removed by compaction until this snapshot is released. +--sleep 5 + +--echo # On Connection 2 +connection con2; +set global rocksdb_debug_ttl_rec_ts = -2; +INSERT INTO t1 values (1); +INSERT INTO t1 values (3); +INSERT INTO t1 values (5); +INSERT INTO t1 values (7); +set global rocksdb_debug_ttl_rec_ts = 0; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +--echo # On Connection 1 +connection con1; +--sorted_result +SELECT * FROM t1; + +--echo # On Connection 2 +connection con2; +--sorted_result +SELECT * FROM t1; +set global rocksdb_enable_ttl_read_filtering=0; +--sorted_result +SELECT * FROM t1; +set global rocksdb_enable_ttl_read_filtering=1; + +disconnect con2; +disconnect con1; +connection default; + +DROP TABLE t1; +# Wait till we reached the initial number of concurrent sessions +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions-master.opt new file mode 100644 index 0000000000000..b991f718a33fe --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions-master.opt @@ -0,0 +1,2 @@ +--rocksdb_enable_ttl_read_filtering=0 +--rocksdb_default_cf_options=disable_auto_compactions=true diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test new file mode 100644 index 0000000000000..dd1a97b32df02 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test @@ -0,0 +1,253 @@ +--source include/have_debug.inc +--source include/have_rocksdb.inc + +# +# Create a table with multiple partitions, but in the comment don't specify +# that per-partition based column families (CF) should be created. Expect that +# default CF will be used and new one won't be created. +# +# In addition, specify TTL on one of the partitions. Insert a few things +# inside all the partitions, verify after compaction that the rows inside the +# partition with TTL has disappeared. +# +CREATE TABLE t1 ( + c1 INT, + PRIMARY KEY (`c1`) +) ENGINE=ROCKSDB +COMMENT="custom_p0_ttl_duration=1;" +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +set global rocksdb_debug_ttl_rec_ts = -3600; +INSERT INTO t1 values (1); +INSERT INTO t1 values (2); +INSERT INTO t1 values (3); +INSERT INTO t1 values (4); +INSERT INTO t1 values (5); +INSERT INTO t1 values (6); +INSERT INTO t1 values (7); +INSERT INTO t1 values (8); +INSERT INTO t1 values (9); +set global rocksdb_debug_ttl_rec_ts = 0; + +--sorted_result +SELECT * FROM t1; +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# 1,4, and 7 should be gone +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; + +# +# Create a table with multiple partitions and request for separate CF to be +# created per every partition. As a result we expect three different CF-s to be +# created. +# +# In addition, specify TTL on some of the partitions. Insert a few things +# inside all the partitions, verify after compaction that the rows inside the +# partition with TTL has disappeared. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz' +) ENGINE=ROCKSDB +COMMENT="custom_p0_ttl_duration=1;custom_p1_ttl_duration=7;" +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 4, 7), + PARTITION custom_p1 VALUES IN (2, 5, 8), + PARTITION custom_p2 VALUES IN (3, 6, 9) +); + +set global rocksdb_debug_ttl_rec_ts = -1200; +INSERT INTO t1 values (1,1,'a'); +INSERT INTO t1 values (4,4,'aaaa'); +INSERT INTO t1 values (7,7,'aaaaaaa'); + +set global rocksdb_debug_ttl_rec_ts = 1200; +INSERT INTO t1 values (2,2,'aa'); +INSERT INTO t1 values (3,3,'aaa'); +INSERT INTO t1 values (5,5,'aaaaa'); +INSERT INTO t1 values (6,6,'aaaaaa'); +INSERT INTO t1 values (8,8,'aaaaaaaa'); +INSERT INTO t1 values (9,9,'aaaaaaaaa'); +set global rocksdb_debug_ttl_rec_ts = 0; + +--sorted_result +SELECT * FROM t1; + +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +--sorted_result +SELECT * FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = 3600; +set @@global.rocksdb_compact_cf = 'foo'; +--sorted_result +SELECT * FROM t1; + +# Now 2,5,8 should be removed (this verifies that TTL is only operating on the +# particular CF. +set @@global.rocksdb_compact_cf = 'my_custom_cf'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +--sorted_result +SELECT * FROM t1; + +DROP TABLE t1; + +# +# Create a table with CF-s/TTL per partition and verify that ALTER TABLE + DROP +# PRIMARY, ADD PRIMARY work for that scenario and data is persisted/filtered as +# expected. +# +CREATE TABLE t1 ( + c1 INT, + c2 INT, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;' +) ENGINE=ROCKSDB +COMMENT="custom_p0_ttl_duration=9999;custom_p2_ttl_duration=5;" +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 2, 3), + PARTITION custom_p1 VALUES IN (4, 5, 6), + PARTITION custom_p2 VALUES IN (7, 8, 9) +); + +INSERT INTO t1 VALUES (1, 1, "one", null); +INSERT INTO t1 VALUES (2, 2, "two", null); +INSERT INTO t1 VALUES (3, 3, "three", null); + +INSERT INTO t1 VALUES (4, 4, "four", null); +INSERT INTO t1 VALUES (5, 5, "five", null); +INSERT INTO t1 VALUES (6, 6, "six", null); + +INSERT INTO t1 VALUES (7, 7, "seven", null); +INSERT INTO t1 VALUES (8, 8, "eight", null); +INSERT INTO t1 VALUES (9, 9, "nine", null); + +--sorted_result +SELECT * FROM t1; + +# TTL should be reset after alter table +set global rocksdb_debug_ttl_rec_ts = 600; +ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(`c2`,`c1`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;'; +set global rocksdb_debug_ttl_rec_ts = 0; +SHOW CREATE TABLE t1; + +# ...so nothing should be gone here +set global rocksdb_debug_ttl_snapshot_ts = 100; +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'baz'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +--sorted_result +SELECT * FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = 1200; +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'baz'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +--sorted_result +SELECT * FROM t1; + +DROP TABLE t1; + +# +# Create a table with non-partitioned TTL duration, with partitioned TTL +# columns +# +# In this case the same TTL duration will be applied across different TTL +# columns in different partitions, except for in p2 where we override the ttl +# duration. +# +CREATE TABLE t1 ( + c1 BIGINT, + c2 BIGINT UNSIGNED NOT NULL, + name VARCHAR(25) NOT NULL, + event DATE, + PRIMARY KEY (`c1`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;' +) ENGINE=ROCKSDB +COMMENT="ttl_duration=1;custom_p1_ttl_duration=100;custom_p1_ttl_col=c2;custom_p2_ttl_duration=5000;" +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1, 2, 3), + PARTITION custom_p1 VALUES IN (4, 5, 6), + PARTITION custom_p2 VALUES IN (7, 8, 9) +); + +set global rocksdb_debug_ttl_rec_ts = -300; +INSERT INTO t1 VALUES (1, UNIX_TIMESTAMP(), "one", null); +INSERT INTO t1 VALUES (2, UNIX_TIMESTAMP(), "two", null); +INSERT INTO t1 VALUES (3, UNIX_TIMESTAMP(), "three", null); +set global rocksdb_debug_ttl_rec_ts = 0; + +INSERT INTO t1 VALUES (4, UNIX_TIMESTAMP(), "four", null); +INSERT INTO t1 VALUES (5, UNIX_TIMESTAMP(), "five", null); +INSERT INTO t1 VALUES (6, UNIX_TIMESTAMP(), "six", null); + +INSERT INTO t1 VALUES (7, UNIX_TIMESTAMP(), "seven", null); +INSERT INTO t1 VALUES (8, UNIX_TIMESTAMP(), "eight", null); +INSERT INTO t1 VALUES (9, UNIX_TIMESTAMP(), "nine", null); + +set global rocksdb_force_flush_memtable_now=1; +set @@global.rocksdb_compact_cf = 'foo'; +set @@global.rocksdb_compact_cf = 'baz'; +set @@global.rocksdb_compact_cf = 'bar'; + +# here we expect only 1,2,3 to be gone, ttl implicit. +--sorted_result +SELECT c1 FROM t1; + +# here we expect only 4,5,6 to be gone, ttl based on column c2. +set global rocksdb_debug_ttl_snapshot_ts = 600; +set @@global.rocksdb_compact_cf = 'bar'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +--sorted_result +SELECT c1 FROM t1; + +# at this point only 7,8,9 should be left.. +DROP TABLE t1; + +# +# Make sure non-partitioned TTL duration/col still works on table with +# partitions. +# +# Simultaneously tests when TTL col is part of the key in partitioned table +# +CREATE TABLE t1 ( + c1 BIGINT, + c2 BIGINT UNSIGNED NOT NULL, + PRIMARY KEY (`c1`, `c2`) +) ENGINE=ROCKSDB +COMMENT="ttl_duration=100;ttl_col=c2;" +PARTITION BY LIST(c1) ( + PARTITION custom_p0 VALUES IN (1), + PARTITION custom_p1 VALUES IN (2), + PARTITION custom_p2 VALUES IN (3) +); + +INSERT INTO t1 values (1, UNIX_TIMESTAMP()); +INSERT INTO t1 values (2, UNIX_TIMESTAMP()); +INSERT INTO t1 values (3, UNIX_TIMESTAMP()); + +set global rocksdb_force_flush_memtable_now=1; +set global rocksdb_compact_cf='default'; + +# everything should still be here +--sorted_result +SELECT c1 FROM t1; + +set global rocksdb_debug_ttl_snapshot_ts = 300; +set global rocksdb_compact_cf='default'; +set global rocksdb_debug_ttl_snapshot_ts = 0; +# everything should now be gone +--sorted_result +SELECT c1 FROM t1; + +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test index 79232255d2559..983c70256b081 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_set_indexes.test @@ -70,27 +70,25 @@ SELECT a FROM t1 WHERE FIND_IN_SET('Europe',a) > 0; DROP TABLE t1; ---disable_parsing -#--error ER_GET_ERRMSG CREATE TABLE t1 ( a SET('N.America','S.America','Africa','Antarctica','Australia','Europe','Asia'), b SET('test1','test2','test3','test4','test5'), c SET('01','22','23','33','34','39','40','44','50','63','64'), pk SET('1','2','3','4','5','6','7','8','9') PRIMARY KEY, UNIQUE INDEX b_a (b,a) -) ENGINE=innodb; +) ENGINE=rocksdb; SHOW INDEX IN t1; -INSERT INTO t1 (a,b,c) VALUES -('','test2,test3','01,34,44,23'), -('',5,2), -('N.America,Asia','test4,test2',''), -('Africa,Europe,Asia','test2,test3','01'), -('Antarctica','test3','34,44'), -('Asia','test5','50'), -('Europe,S.America','test1,','39'); +INSERT INTO t1 (a,b,c,pk) VALUES +('','test2,test3','01,34,44,23',1), +('',5,2,2), +('N.America,Asia','test4,test2','',3), +('Africa,Europe,Asia','test2,test3','01',4), +('Antarctica','test3','34,44',5), +('Asia','test5','50',6), +('Europe,S.America','test1,','39',7); --replace_column 9 # EXPLAIN SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; @@ -99,6 +97,4 @@ SELECT DISTINCT a, b FROM t1 ORDER BY b DESC, a; DROP TABLE t1; ---enable_parsing - SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar-master.opt new file mode 100644 index 0000000000000..33e72265db2c0 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar-master.opt @@ -0,0 +1 @@ +--rocksdb_debug_optimizer_n_rows=10 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc index f813b8871b155..bcca0c3a499f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc @@ -43,6 +43,7 @@ insert into t1 values (1, 'a ', 'a-space'); insert into t1 values (2, 'a', 'a'); insert into t1 values (3, 'a \t', 'a-tab'); +analyze table t1; --echo # Must show 'using index' for latin1_bin and utf8_bin: --replace_column 9 # explain diff --git a/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test index 0c7df020ef5e3..a15e5d2a7be19 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/unsupported_tx_isolations.test @@ -8,17 +8,17 @@ insert into t1 values (1,1); SET session transaction isolation level read uncommitted; begin; ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_MODE_NOT_SUPPORTED insert into t1 values (2,1); ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_MODE_NOT_SUPPORTED select * from t1 where id=1; rollback; SET session transaction isolation level serializable; begin; ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_MODE_NOT_SUPPORTED insert into t1 values (2,1); ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_MODE_NOT_SUPPORTED select * from t1 where id=1; rollback; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/update.test b/storage/rocksdb/mysql-test/rocksdb/t/update.test index e0a49ee8ca50d..51e2c5be0a6da 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/update.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/update.test @@ -61,10 +61,10 @@ BEGIN; UPDATE t1 SET b = 'update2' WHERE a <= 100; SAVEPOINT spt1; UPDATE t1 SET b = ''; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_TO_SAVEPOINT ROLLBACK TO SAVEPOINT spt1; UPDATE t1 SET b = 'upd' WHERE a = 10050; ---error ER_UNKNOWN_ERROR +--error ER_ROLLBACK_ONLY COMMIT; SELECT * FROM t1 ORDER BY pk; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test index 349748e91a838..f4492b2ab1738 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/use_direct_reads_writes.test @@ -36,7 +36,7 @@ shutdown_server 10; --exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err ---exec echo "restart:--rocksdb_use_direct_writes=1 --rocksdb_allow_mmap_writes=1" >$_expect_file_name +--exec echo "restart:--rocksdb_use_direct_io_for_flush_and_compaction=1 --rocksdb_allow_mmap_writes=1" >$_expect_file_name --sleep 0.1 --exec echo "restart:" >$_expect_file_name @@ -44,4 +44,4 @@ shutdown_server 10; --source include/wait_until_connected_again.inc --disable_reconnect ---exec grep "enable both use_direct_writes" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 +--exec grep "enable both use_direct_io_for_flush_and_compaction" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test b/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test new file mode 100644 index 0000000000000..d00a8b7afbe1a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test @@ -0,0 +1,131 @@ +--source include/have_debug.inc +--source include/have_rocksdb.inc + +# Create a table with a varbinary key with the current format and validate +# that it sorts correctly +CREATE TABLE t1( + vb VARBINARY(64) primary key +) ENGINE=rocksdb; + +INSERT INTO t1 values(0x00); +INSERT INTO t1 values(0x0000); +INSERT INTO t1 values(0x0000000000000000); +INSERT INTO t1 values(0x000000); +INSERT INTO t1 values(0x000000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x0000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000); +INSERT INTO t1 values(0x0000000000); +INSERT INTO t1 values(0x00000000000000000000); +INSERT INTO t1 values(0x000000000000); +INSERT INTO t1 values(0x00000000000000); +INSERT INTO t1 values(0x000000000000000000); + +SELECT hex(vb) FROM t1; + +# Use the fact that the rocksdb_locks shows the keys as they are encoded to +# validate that the keys were encoded as expected +BEGIN; +SELECT hex(vb) FROM t1 FOR UPDATE; +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +ROLLBACK; + +DROP TABLE t1; + +# Now create the same table in the old format to show that they can be read +# and handled correctly +set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +CREATE TABLE t1( + vb VARBINARY(64) primary key +) ENGINE=rocksdb; +set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; + +INSERT INTO t1 values(0x00); +INSERT INTO t1 values(0x0000); +INSERT INTO t1 values(0x0000000000000000); +INSERT INTO t1 values(0x000000); +INSERT INTO t1 values(0x000000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x0000000000000000000000000000000000000000000000000000000000000000); +INSERT INTO t1 values(0x00000000); +INSERT INTO t1 values(0x0000000000); +INSERT INTO t1 values(0x00000000000000000000); +INSERT INTO t1 values(0x000000000000); +INSERT INTO t1 values(0x00000000000000); +INSERT INTO t1 values(0x000000000000000000); + +SELECT hex(vb) FROM t1; + +# Use the fact that the rocksdb_locks shows the keys as they are encoded to +# validate that the keys were encoded as expected +BEGIN; +SELECT hex(vb) FROM t1 FOR UPDATE; +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +ROLLBACK; + +DROP TABLE t1; + +# Now create a table with a varchar key using a binary collation with the +# current format and validate that it sorts correctly +CREATE TABLE t1( + vc VARCHAR(64) collate 'binary' primary key +) ENGINE=rocksdb; + +INSERT INTO t1 values('a'); +INSERT INTO t1 values('aa'); +INSERT INTO t1 values('aaaaaaaa'); +INSERT INTO t1 values('aaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaa'); +INSERT INTO t1 values('aaaaa'); +INSERT INTO t1 values('aaaaaaaaaa'); +INSERT INTO t1 values('aaaaaa'); +INSERT INTO t1 values('aaaaaaa'); +INSERT INTO t1 values('aaaaaaaaa'); + +SELECT * FROM t1; + +# Use the fact that the rocksdb_locks shows the keys as they are encoded to +# validate that the keys were encoded as expected +BEGIN; +SELECT * FROM t1 FOR UPDATE; +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +ROLLBACK; + +DROP TABLE t1; + +# Now create the same table in the old format to show that they can be read +# and handled correctly +set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +CREATE TABLE t1( + vc VARCHAR(64) collate 'binary' primary key +) ENGINE=rocksdb; +set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; + +INSERT INTO t1 values('a'); +INSERT INTO t1 values('aa'); +INSERT INTO t1 values('aaaaaaaa'); +INSERT INTO t1 values('aaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'); +INSERT INTO t1 values('aaaa'); +INSERT INTO t1 values('aaaaa'); +INSERT INTO t1 values('aaaaaaaaaa'); +INSERT INTO t1 values('aaaaaa'); +INSERT INTO t1 values('aaaaaaa'); +INSERT INTO t1 values('aaaaaaaaa'); + +SELECT * FROM t1; + +# Use the fact that the rocksdb_locks shows the keys as they are encoded to +# validate that the keys were encoded as expected +BEGIN; +SELECT * FROM t1 FOR UPDATE; +SELECT SUBSTRING(a.key,9) FROM information_schema.rocksdb_locks AS a ORDER BY a.key; +ROLLBACK; + +DROP TABLE t1; + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index 672687b044eff..c21c65a7b40f0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -5,7 +5,8 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; -SET LOCAL rocksdb_flush_log_at_trx_commit=0; +set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit; +SET GLOBAL rocksdb_flush_log_at_trx_commit=0; --exec sleep 30 select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(1,1); @@ -15,7 +16,7 @@ select variable_value-@a from information_schema.global_status where variable_na insert aaa(id, i) values(3,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; -SET LOCAL rocksdb_flush_log_at_trx_commit=1; +SET GLOBAL rocksdb_flush_log_at_trx_commit=1; insert aaa(id, i) values(4,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; insert aaa(id, i) values(5,1); @@ -23,8 +24,7 @@ select variable_value-@a from information_schema.global_status where variable_na insert aaa(id, i) values(6,1); select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced'; -SET GLOBAL rocksdb_background_sync=on; -SET LOCAL rocksdb_flush_log_at_trx_commit=0; +SET GLOBAL rocksdb_flush_log_at_trx_commit=2; insert aaa(id, i) values(7,1); let $status_var=rocksdb_wal_synced; @@ -35,8 +35,6 @@ truncate table aaa; # Cleanup drop table aaa; -SET GLOBAL rocksdb_flush_log_at_trx_commit=1; +set @@global.rocksdb_flush_log_at_trx_commit=@save_rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=false; -SET GLOBAL rocksdb_background_sync=off; - diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_table.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_table.sh new file mode 100755 index 0000000000000..a4d60dc864c04 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/create_table.sh @@ -0,0 +1,16 @@ +set -e + +COPY_LOG=$1 +SIGNAL_FILE=$2 +# Creating a table after myrocks_hotbackup reaches waiting loop + +done=0 +while : ; do + wait=`tail -1 $COPY_LOG | grep 'Waiting until' | wc -l` + if [ "$wait" -eq "1" ]; then + break + fi + sleep 1 +done +$MYSQL --defaults-group-suffix=.1 db1 -e "create table r10 (id int primary key ) engine=rocksdb" +touch $SIGNAL_FILE diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh index ef505e4b888b7..ecf8a85126754 100755 --- a/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/include/stream_run.sh @@ -1,3 +1,5 @@ +#!/bin/bash + if [ "$STREAM_TYPE" == 'wdt' ]; then which wdt >/dev/null 2>&1 if [ $? -ne 0 ]; then @@ -7,6 +9,7 @@ if [ "$STREAM_TYPE" == 'wdt' ]; then fi set -e +set -o pipefail # Takes a full backup from server_1 to server_2 # using myrocks_hotbackup streaming @@ -29,25 +32,37 @@ rm -rf $dest_data_dir/ mkdir $dest_data_dir COPY_LOG="${MYSQL_TMP_DIR}/myrocks_hotbackup_copy_log" +SIGNAL_CONDITION="" +SIGNAL_FILE=${MYSQL_TMP_DIR}/myrocks_hotbackup_signal +rm -f $COPY_LOG +rm -f $SIGNAL_FILE + +if [ "$FRM" == '1' ]; then + suite/rocksdb_hotbackup/include/create_table.sh $COPY_LOG $SIGNAL_FILE 2>&1 & +fi + +if [ "$DEBUG_SIGNAL" == '1' ]; then + SIGNAL_CONDITION="--debug_signal_file=$SIGNAL_FILE" +fi if [ "$STREAM_TYPE" == 'tar' ]; then BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \ - --stream=tar --checkpoint_dir=$checkpoint_dir 2> \ + --stream=tar --checkpoint_dir=$checkpoint_dir $SIGNAL_CONDITION 2> \ $COPY_LOG | tar -xi -C $backup_dir" elif [ "$STREAM_TYPE" == 'xbstream' ]; then BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --port=${MASTER_MYPORT} \ - --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ + --stream=xbstream --checkpoint_dir=$checkpoint_dir $SIGNAL_CONDITION 2> \ $COPY_LOG | xbstream -x \ --directory=$backup_dir" elif [ "$STREAM_TYPE" == "xbstream_socket" ]; then BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --socket=${MASTER_MYSOCK} \ - --stream=xbstream --checkpoint_dir=$checkpoint_dir 2> \ + --stream=xbstream --checkpoint_dir=$checkpoint_dir $SIGNAL_CONDITION 2> \ $COPY_LOG | xbstream -x \ --directory=$backup_dir" else BACKUP_CMD="$MYSQL_MYROCKS_HOTBACKUP --user='root' --stream=wdt \ --port=${MASTER_MYPORT} --destination=localhost --backup_dir=$backup_dir \ - --avg_mbytes_per_sec=10 --interval=5 \ + --avg_mbytes_per_sec=10 --interval=5 $SIGNAL_CONDITION \ --extra_wdt_sender_options='--block_size_mbytes=1' \ --checkpoint_dir=$checkpoint_dir 2> \ $COPY_LOG" @@ -55,10 +70,6 @@ fi echo "myrocks_hotbackup copy phase" eval "$BACKUP_CMD" -if [ $? -ne 0 ]; then - tail $COPY_LOG - exit 1 -fi mkdir ${backup_dir}/test # TODO: Fix skipping empty directories @@ -70,7 +81,3 @@ $MYSQL_MYROCKS_HOTBACKUP --move_back --datadir=$dest_data_dir \ --rocksdb_waldir=$dest_data_dir/.rocksdb \ --backup_dir=$backup_dir > $MOVEBACK_LOG 2>&1 -if [ $? -ne 0 ]; then - tail $MOVEBACK_LOG - exit 1 -fi diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result index d3f2ebc4e6f40..31ed267744470 100644 --- a/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/r/xbstream.result @@ -10,6 +10,7 @@ key (`k`) ) engine=rocksdb; include/rpl_stop_server.inc [server_number=2] myrocks_hotbackup copy phase +myrocks_hotbackup copy phase myrocks_hotbackup move-back phase include/rpl_start_server.inc [server_number=2] select count(*) from db1.t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test index 9bfab4252c42e..52456a6814073 100644 --- a/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test +++ b/storage/rocksdb/mysql-test/rocksdb_hotbackup/t/xbstream.test @@ -5,6 +5,9 @@ source suite/rocksdb_hotbackup/include/setup.inc; --let $rpl_server_number= 2 --source include/rpl_stop_server.inc +--error 1 +--exec STREAM_TYPE=xbstream FRM=1 DEBUG_SIGNAL=1 suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 + --exec STREAM_TYPE=xbstream suite/rocksdb_hotbackup/include/stream_run.sh 2>&1 --let $rpl_server_number= 2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result index e0dbc92cdf571..5559bf6168c05 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/rpl_skip_trx_api_binlog_format.result @@ -10,7 +10,7 @@ create table t1(a int); set session binlog_format=STATEMENT; insert into t1 values(1); include/wait_for_slave_sql_error.inc [errno=1756] -Last_SQL_Error = 'Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave. rpl_skip_tx_api recovery should only be used when master's binlog format is ROW.' +Last_SQL_Error = 'Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave, this should only be used when master's binlog format is ROW.' "Table after error" select * from t1; a diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result new file mode 100644 index 0000000000000..3d734c9498d50 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_recovery.result @@ -0,0 +1,24 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +call mtr.add_suppression("Recovery from master pos"); +drop table if exists r1; +create table r1 (id1 int, id2 int, primary key (id1, id2), index i (id2)) engine=rocksdb; +insert into r1 values (1, 1000); +set global rocksdb_force_flush_memtable_now=1; +include/rpl_start_server.inc [server_number=2] +include/start_slave.inc +delete r1 from r1 force index (i) where id2=1000; +select id1,id2 from r1 force index (primary) where id1=1 and id2=1000; +id1 id2 +select id2 from r1 force index (i) where id1=1 and id2=1000; +id2 +set global rocksdb_compact_cf='default'; +select id1,id2 from r1 force index (primary) where id1=1 and id2=1000; +id1 id2 +select id2 from r1 force index (i) where id1=1 and id2=1000; +id2 +drop table r1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_table.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_table.result new file mode 100644 index 0000000000000..609d4a8821a3d --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/singledelete_idempotent_table.result @@ -0,0 +1,25 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +drop table if exists r1; +create table r1 (id1 int, id2 int, primary key (id1, id2), index i (id2)) engine=rocksdb; +insert into r1 values (1, 1000); +set sql_log_bin=0; +delete from r1 where id1=1 and id2=1000; +set sql_log_bin=1; +set global rocksdb_force_flush_memtable_now=1; +insert into r1 values (1, 1000); +delete r1 from r1 force index (i) where id2=1000; +select id1,id2 from r1 force index (primary); +id1 id2 +select id2 from r1 force index (i); +id2 +set global rocksdb_compact_cf='default'; +select id1,id2 from r1 force index (primary); +id1 id2 +select id2 from r1 force index (i); +id2 +drop table r1; +include/rpl_end.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-master.opt similarity index 100% rename from storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-mater.opt rename to storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc-master.opt diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test index f47f83b0bd240..0f68de0471208 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/multiclient_2pc.test @@ -35,7 +35,7 @@ insert into t1 values (2, 1, "i_am_just_here_to_trigger_a_flush"); # Disable 2PC and syncing for faster inserting of dummy rows # These rows only purpose is to rotate the binlog -SET GLOBAL ROCKSDB_ENABLE_2PC = ON; +SET GLOBAL ROCKSDB_ENABLE_2PC = OFF; SET GLOBAL ROCKSDB_WRITE_SYNC = OFF; SET GLOBAL SYNC_BINLOG = 0; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test index 949fbad666df3..f1b1b16704fa9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe.test @@ -1,3 +1,4 @@ +-- source include/have_rocksdb.inc -- source include/have_gtid.inc -- source include/master-slave.inc -- source include/have_debug.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc index 43ee7ec526c23..a52bfc9186d8c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_crash_safe_wal_corrupt.inc @@ -1,3 +1,4 @@ +source include/have_rocksdb.inc; source include/master-slave.inc; -- let $uuid = `select @@server_uuid;` diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test index 56c0eac25174a..d1793c4af1e5f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_gtid_rocksdb_sys_header.test @@ -1,4 +1,5 @@ # based on rpl/rpl_gtid_innondb_sys_header.test +source include/have_rocksdb.inc; source include/master-slave.inc; source include/have_gtid.inc; source include/have_debug.inc; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test index 8c79d2afa03ad..cecacda44e8ce 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag.test @@ -2,5 +2,6 @@ --echo # Ensure skip_unique_check is set when lag exceeds lag_threshold --echo # +--source include/have_rocksdb.inc --source ../include/rpl_no_unique_check_on_lag.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test index c5cf1a8ae92d9..7e77ec87c3bb3 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_no_unique_check_on_lag_mts.test @@ -1,2 +1,3 @@ +--source include/have_rocksdb.inc --source ../include/rpl_no_unique_check_on_lag.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test index 37f80c8ace5e7..200f1cb314e03 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot.test @@ -1,3 +1,4 @@ +--source include/have_rocksdb.inc --source include/master-slave.inc --source include/have_binlog_format_row.inc @@ -38,9 +39,9 @@ CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=rocksdb; INSERT INTO t1 VALUES(1); SET TRANSACTION ISOLATION LEVEL READ COMMITTED; ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT SNAPSHOT; ---error ER_UNKNOWN_ERROR +--error ER_ISOLATION_LEVEL_WITH_CONSISTENT_SNAPSHOT START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; ROLLBACK; SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test index 2b590f8465389..79d71f20e8a81 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_snapshot_without_gtid.test @@ -1,3 +1,4 @@ +--source include/have_rocksdb.inc --source include/master-slave.inc --source include/have_binlog_format_row.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt index 67f0fcf77f07f..b3d52445ad8fb 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_rocksdb_stress_crash-slave.opt @@ -1,2 +1,3 @@ --gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --max_binlog_size=50000 --slave_parallel_workers=30 --relay_log_recovery=1 --rocksdb_unsafe_for_binlog=TRUE +--rocksdb_wal_recovery_mode=2 diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test index 22151d1454751..1ea9add80190b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/rpl_skip_trx_api_binlog_format.test @@ -1,5 +1,6 @@ # Checks if the slave stops executing transactions when master's binlog format # is STATEMENT but rpl_skip_tx_api is enabled +-- source include/have_rocksdb.inc -- source include/master-slave.inc call mtr.add_suppression("Master's binlog format is not ROW but rpl_skip_tx_api is enabled on the slave"); diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.cnf new file mode 100644 index 0000000000000..71e124adc8106 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.cnf @@ -0,0 +1,15 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON + +[mysqld.2] +relay_log_recovery=1 +relay_log_info_repository=FILE +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON +slave_use_idempotent_for_recovery=Yes + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test new file mode 100644 index 0000000000000..9180afa881fb2 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_recovery.test @@ -0,0 +1,72 @@ + +--source include/have_binlog_format_row.inc +--source include/have_rocksdb.inc +--source include/master-slave.inc +--source include/have_gtid.inc +--source include/not_valgrind.inc + +# This is a test case for issue#655 -- SingleDelete on Primary Key may +# cause extra rows than Secondary Keys + +call mtr.add_suppression("Recovery from master pos"); + +connection master; +--disable_warnings +drop table if exists r1; +--enable_warnings +create table r1 (id1 int, id2 int, primary key (id1, id2), index i (id2)) engine=rocksdb; +insert into r1 values (1, 1000); + +sync_slave_with_master; +connection slave; +set global rocksdb_force_flush_memtable_now=1; +--let slave_data_dir= query_get_value(SELECT @@DATADIR, @@DATADIR, 1) +--let slave_binlog_file= query_get_value(SHOW MASTER STATUS, File, 1) +--let slave_pid_file= query_get_value(SELECT @@pid_file, @@pid_file, 1) +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect + +--write_file $MYSQL_TMP_DIR/truncate_tail_binlog.sh +#!/bin/bash + +F=$slave_data_dir/$slave_binlog_file +SIZE=`stat -c %s $F` +NEW_SIZE=`expr $SIZE - 100` +truncate -s $NEW_SIZE $F +rc=$? +if [[ $rc != 0 ]]; then + exit 1 +fi + +kill -9 `head -1 $slave_pid_file` + +exit 0 +EOF +--chmod 0755 $MYSQL_TMP_DIR/truncate_tail_binlog.sh +--exec $MYSQL_TMP_DIR/truncate_tail_binlog.sh + +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect + +# Crash recovery (losing some binlogs) with slave_use_idempotent_for_recovery may +# replay same transactions with slave_exec_mode=idempotent implicitly enabled. +# On slave, the last insert is converted to update with the same key. +# It should be treated as SD and Put (same as singledelete_idempotent_table.test). + +--source include/rpl_start_server.inc +--source include/start_slave.inc +connection master; +sync_slave_with_master; +connection slave; +delete r1 from r1 force index (i) where id2=1000; +select id1,id2 from r1 force index (primary) where id1=1 and id2=1000; +select id2 from r1 force index (i) where id1=1 and id2=1000; +set global rocksdb_compact_cf='default'; +select id1,id2 from r1 force index (primary) where id1=1 and id2=1000; +select id2 from r1 force index (i) where id1=1 and id2=1000; + +connection master; +drop table r1; + +--remove_file $MYSQL_TMP_DIR/truncate_tail_binlog.sh +--source include/rpl_end.inc + + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.cnf b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.cnf new file mode 100644 index 0000000000000..ad4894f5b3839 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.cnf @@ -0,0 +1,15 @@ +!include suite/rpl/my.cnf + +[mysqld.1] +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON + +[mysqld.2] +relay_log_recovery=1 +relay_log_info_repository=FILE +log_slave_updates +gtid_mode=ON +enforce_gtid_consistency=ON +rbr_idempotent_tables='r1' + diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.test new file mode 100644 index 0000000000000..23d335d6b5741 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/singledelete_idempotent_table.test @@ -0,0 +1,44 @@ + +--source include/have_binlog_format_row.inc +--source include/have_rocksdb.inc +--source include/master-slave.inc +--source include/have_gtid.inc +--source include/not_valgrind.inc + +# This is a test case for issue#655 -- SingleDelete on Primary Key may +# cause extra rows than Secondary Keys + +connection master; +--disable_warnings +drop table if exists r1; +--enable_warnings +create table r1 (id1 int, id2 int, primary key (id1, id2), index i (id2)) engine=rocksdb; +insert into r1 values (1, 1000); +set sql_log_bin=0; +delete from r1 where id1=1 and id2=1000; +set sql_log_bin=1; + +sync_slave_with_master; +connection slave; +set global rocksdb_force_flush_memtable_now=1; + +connection master; +# same key insert on slave. Since slave sets rbr_idempotent_tables, the insert +# is converted to update with the same key. MyRocks should call SD and Put for the key +insert into r1 values (1, 1000); +sync_slave_with_master; + +connection slave; +delete r1 from r1 force index (i) where id2=1000; +select id1,id2 from r1 force index (primary); +select id2 from r1 force index (i); +set global rocksdb_compact_cf='default'; +select id1,id2 from r1 force index (primary); +select id2 from r1 force index (i); + +connection master; +drop table r1; + +--source include/rpl_end.inc + + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc new file mode 100644 index 0000000000000..f675aec19f98e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/correctboolvalue.inc @@ -0,0 +1,25 @@ +## +# $input the value of a boolean type +# $output the value of int type +## +--let $int_value=$value +if ($value==on) +{ + --let $int_value=1 +} + +if ($value==off) +{ + --let $int_value=0 +} + +# MySQL allows 'true' and 'false' on bool values +if ($value==true) +{ + --let $int_value=1 +} + +if ($value==false) +{ + --let $int_value=0 +} diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc new file mode 100644 index 0000000000000..c3ac5e1229eaf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc @@ -0,0 +1,119 @@ +## +# $sys_var name of the variable +# $read_only - true if read-only +# $session - true if this is session, false if global-only +# valid_values table should contain valid values +# invalid_values +## + +--eval SET @start_global_value = @@global.$sys_var +SELECT @start_global_value; +if ($session) +{ + --eval SET @start_session_value = @@session.$sys_var + SELECT @start_session_value; +} + +if (!$read_only) +{ + --echo '# Setting to valid values in global scope#' + + --let $i=1 + --let $value=query_get_value(select value from valid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@global.$sys_var to $value" + --eval SET @@global.$sys_var = $value + --eval SELECT @@global.$sys_var + --let $v=`SELECT @@global.$sys_var` + --source ./correctboolvalue.inc + if (!$sticky) + { + if ($v != $int_value) + { + --echo Set @@global.$sys_var to $value but it remained set to $v + --die Wrong variable value + } + } + + --echo "Setting the global scope variable back to default" + --eval SET @@global.$sys_var = DEFAULT + --eval SELECT @@global.$sys_var + + --inc $i + --let $value=query_get_value(select value from valid_values, value, $i) + } + + if ($session) + { + --echo '# Setting to valid values in session scope#' + + --let $i=1 + --let $value=query_get_value(select value from valid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@session.$sys_var to $value" + --eval SET @@session.$sys_var = $value + --eval SELECT @@session.$sys_var + --let $v=`SELECT @@session.$sys_var` + --source ./correctboolvalue.inc + if (!$sticky) + { + if ($v != $int_value) + { + --echo Set @@session.$sys_var to $value but it remained set to $v + --die Wrong variable value + } + } + --echo "Setting the session scope variable back to default" + --eval SET @@session.$sys_var = DEFAULT + --eval SELECT @@session.$sys_var + + --inc $i + --let $value=query_get_value(select value from valid_values, value, $i) + } + } + if (!$session) + { + --echo "Trying to set variable @@session.$sys_var to 444. It should fail because it is not session." + --Error ER_GLOBAL_VARIABLE + --eval SET @@session.$sys_var = 444 + } + + --echo '# Testing with invalid values in global scope #' + #################################################################### + # Change the value of query_prealloc_size to an invalid value # + #################################################################### + --let $i=1 + --let $value=query_get_value(select value from invalid_values, value, $i) + while ($value != 'No such row') + { + --echo "Trying to set variable @@global.$sys_var to $value" + --Error ER_WRONG_VALUE_FOR_VAR, ER_WRONG_TYPE_FOR_VAR + --eval SET @@global.$sys_var = $value + --eval SELECT @@global.$sys_var + --inc $i + --let $value=query_get_value(select value from invalid_values, value, $i) + } +} + +if ($read_only) +{ + --echo "Trying to set variable @@global.$sys_var to 444. It should fail because it is readonly." + --Error ER_INCORRECT_GLOBAL_LOCAL_VAR + --eval SET @@global.$sys_var = 444 +} + +#################################### +# Restore initial value # +#################################### +if (!$read_only) +{ + --eval SET @@global.$sys_var = @start_global_value + --eval SELECT @@global.$sys_var + if ($session) + { + --eval SET @@session.$sys_var = @start_session_value + --eval SELECT @@session.$sys_var + } +} diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result index 9f21825d262f0..159d6a983c8c1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/all_vars.result @@ -9,7 +9,5 @@ There should be *no* long test name listed below: select variable_name as `There should be *no* variables listed below:` from t2 left join t1 on variable_name=test_name where test_name is null ORDER BY variable_name; There should be *no* variables listed below: -ROCKSDB_ENABLE_2PC -ROCKSDB_ENABLE_2PC drop table t1; drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result index 93ec1aec40762..3b174fbbc6355 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_allow_concurrent_memtable_write_basic.result @@ -1,64 +1,7 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); SET @start_global_value = @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; SELECT @start_global_value; @start_global_value 0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 1" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 1; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 0" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 0; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to on" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = on; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = DEFAULT; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -"Trying to set variable @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 444. It should fail because it is not session." -SET @@session.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 444; -ERROR HY000: Variable 'rocksdb_allow_concurrent_memtable_write' is a GLOBAL variable and should be set with SET GLOBAL -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'aaa'" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 'bbb'" -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 'bbb'; -Got one of the listed errors -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = @start_global_value; -SELECT @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE; -@@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; +"Trying to set variable @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE = 444; +ERROR HY000: Variable 'rocksdb_allow_concurrent_memtable_write' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result deleted file mode 100644 index 8998bfee64d66..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_background_sync_basic.result +++ /dev/null @@ -1,68 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -SET @start_global_value = @@global.ROCKSDB_BACKGROUND_SYNC; -SELECT @start_global_value; -@start_global_value -0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 1" -SET @@global.ROCKSDB_BACKGROUND_SYNC = 1; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 0" -SET @@global.ROCKSDB_BACKGROUND_SYNC = 0; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to on" -SET @@global.ROCKSDB_BACKGROUND_SYNC = on; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to off" -SET @@global.ROCKSDB_BACKGROUND_SYNC = off; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_BACKGROUND_SYNC = DEFAULT; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -"Trying to set variable @@session.ROCKSDB_BACKGROUND_SYNC to 444. It should fail because it is not session." -SET @@session.ROCKSDB_BACKGROUND_SYNC = 444; -ERROR HY000: Variable 'rocksdb_background_sync' is a GLOBAL variable and should be set with SET GLOBAL -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_BACKGROUND_SYNC to 'aaa'" -SET @@global.ROCKSDB_BACKGROUND_SYNC = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -SET @@global.ROCKSDB_BACKGROUND_SYNC = @start_global_value; -SELECT @@global.ROCKSDB_BACKGROUND_SYNC; -@@global.ROCKSDB_BACKGROUND_SYNC -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result deleted file mode 100644 index 09acaada0c631..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_base_background_compactions_basic.result +++ /dev/null @@ -1,7 +0,0 @@ -SET @start_global_value = @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS; -SELECT @start_global_value; -@start_global_value -1 -"Trying to set variable @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_BASE_BACKGROUND_COMPACTIONS = 444; -ERROR HY000: Variable 'rocksdb_base_background_compactions' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result index 35e4d252e11ff..ba280a32ab288 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_create_checkpoint_basic.result @@ -12,4 +12,4 @@ SET @@global.ROCKSDB_CREATE_CHECKPOINT = DEFAULT; SET @@session.ROCKSDB_CREATE_CHECKPOINT = 444; ERROR HY000: Variable 'rocksdb_create_checkpoint' is a GLOBAL variable and should be set with SET GLOBAL SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value; -ERROR HY000: RocksDB: Failed to create checkpoint directory. status 5 IO error: .tmp: No such file or directory +ERROR HY000: Status error 5 received from RocksDB: IO error: While renaming a file to : .tmp: No such file or directory diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_read_filter_ts_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_read_filter_ts_basic.result new file mode 100644 index 0000000000000..bbc46001817e4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_read_filter_ts_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2400); +INSERT INTO valid_values VALUES(-2400); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS to 2400" +SET @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = 2400; +SELECT @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +@@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS +2400 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +@@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS +0 +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS to -2400" +SET @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = -2400; +SELECT @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +@@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS +-2400 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +@@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS +0 +"Trying to set variable @@session.ROCKSDB_DEBUG_TTL_READ_FILTER_TS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = 444; +ERROR HY000: Variable 'rocksdb_debug_ttl_read_filter_ts' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS to 'aaa'" +SET @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +@@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS +0 +SET @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS = @start_global_value; +SELECT @@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS; +@@global.ROCKSDB_DEBUG_TTL_READ_FILTER_TS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_rec_ts_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_rec_ts_basic.result new file mode 100644 index 0000000000000..347ba9a0b3dc4 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_rec_ts_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2400); +INSERT INTO valid_values VALUES(-2400); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_DEBUG_TTL_REC_TS; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_REC_TS to 2400" +SET @@global.ROCKSDB_DEBUG_TTL_REC_TS = 2400; +SELECT @@global.ROCKSDB_DEBUG_TTL_REC_TS; +@@global.ROCKSDB_DEBUG_TTL_REC_TS +2400 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_TTL_REC_TS = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_TTL_REC_TS; +@@global.ROCKSDB_DEBUG_TTL_REC_TS +0 +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_REC_TS to -2400" +SET @@global.ROCKSDB_DEBUG_TTL_REC_TS = -2400; +SELECT @@global.ROCKSDB_DEBUG_TTL_REC_TS; +@@global.ROCKSDB_DEBUG_TTL_REC_TS +-2400 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_TTL_REC_TS = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_TTL_REC_TS; +@@global.ROCKSDB_DEBUG_TTL_REC_TS +0 +"Trying to set variable @@session.ROCKSDB_DEBUG_TTL_REC_TS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DEBUG_TTL_REC_TS = 444; +ERROR HY000: Variable 'rocksdb_debug_ttl_rec_ts' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_REC_TS to 'aaa'" +SET @@global.ROCKSDB_DEBUG_TTL_REC_TS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_TTL_REC_TS; +@@global.ROCKSDB_DEBUG_TTL_REC_TS +0 +SET @@global.ROCKSDB_DEBUG_TTL_REC_TS = @start_global_value; +SELECT @@global.ROCKSDB_DEBUG_TTL_REC_TS; +@@global.ROCKSDB_DEBUG_TTL_REC_TS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_snapshot_ts_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_snapshot_ts_basic.result new file mode 100644 index 0000000000000..03a937ef218ea --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_debug_ttl_snapshot_ts_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2400); +INSERT INTO valid_values VALUES(-2400); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +SET @start_global_value = @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS to 2400" +SET @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = 2400; +SELECT @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +@@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +2400 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +@@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +0 +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS to -2400" +SET @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = -2400; +SELECT @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +@@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +-2400 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = DEFAULT; +SELECT @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +@@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +0 +"Trying to set variable @@session.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = 444; +ERROR HY000: Variable 'rocksdb_debug_ttl_snapshot_ts' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS to 'aaa'" +SET @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +@@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +0 +SET @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS = @start_global_value; +SELECT @@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS; +@@global.ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result index 3eefd822e69a8..13749e1c220bc 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_delayed_write_rate_basic.result @@ -11,7 +11,7 @@ INSERT INTO invalid_values VALUES('\'484436\''); SET @start_global_value = @@global.ROCKSDB_DELAYED_WRITE_RATE; SELECT @start_global_value; @start_global_value -16777216 +0 '# Setting to valid values in global scope#' "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 100" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 100; @@ -22,7 +22,7 @@ SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 1" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 1; SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@ -32,7 +32,7 @@ SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 0" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 0; SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@ -42,7 +42,7 @@ SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT; SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@session.ROCKSDB_DELAYED_WRITE_RATE to 444. It should fail because it is not session." SET @@session.ROCKSDB_DELAYED_WRITE_RATE = 444; ERROR HY000: Variable 'rocksdb_delayed_write_rate' is a GLOBAL variable and should be set with SET GLOBAL @@ -52,34 +52,34 @@ SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'aaa'; Got one of the listed errors SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'bbb'" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'bbb'; Got one of the listed errors SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '-1'" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '-1'; Got one of the listed errors SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '101'" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '101'; Got one of the listed errors SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 "Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '484436'" SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '484436'; Got one of the listed errors SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 SET @@global.ROCKSDB_DELAYED_WRITE_RATE = @start_global_value; SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE; @@global.ROCKSDB_DELAYED_WRITE_RATE -16777216 +0 DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_2pc_basic.result similarity index 100% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_disable_2pc_basic.result rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_2pc_basic.result diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result index f12e39fff93e1..a63383a4d5936 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_thread_tracking_basic.result @@ -1,7 +1,7 @@ SET @start_global_value = @@global.ROCKSDB_ENABLE_THREAD_TRACKING; SELECT @start_global_value; @start_global_value -0 +1 "Trying to set variable @@global.ROCKSDB_ENABLE_THREAD_TRACKING to 444. It should fail because it is readonly." SET @@global.ROCKSDB_ENABLE_THREAD_TRACKING = 444; ERROR HY000: Variable 'rocksdb_enable_thread_tracking' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_basic.result new file mode 100644 index 0000000000000..1f569235b6321 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_TTL; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL to 1" +SET @@global.ROCKSDB_ENABLE_TTL = 1; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_TTL = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL to 0" +SET @@global.ROCKSDB_ENABLE_TTL = 0; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_TTL = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL to on" +SET @@global.ROCKSDB_ENABLE_TTL = on; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_TTL = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +"Trying to set variable @@session.ROCKSDB_ENABLE_TTL to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ENABLE_TTL = 444; +ERROR HY000: Variable 'rocksdb_enable_ttl' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL to 'aaa'" +SET @@global.ROCKSDB_ENABLE_TTL = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL to 'bbb'" +SET @@global.ROCKSDB_ENABLE_TTL = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +SET @@global.ROCKSDB_ENABLE_TTL = @start_global_value; +SELECT @@global.ROCKSDB_ENABLE_TTL; +@@global.ROCKSDB_ENABLE_TTL +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_read_filtering_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_read_filtering_basic.result new file mode 100644 index 0000000000000..005c15e168bb5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_ttl_read_filtering_basic.result @@ -0,0 +1,64 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +SELECT @start_global_value; +@start_global_value +1 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING to 1" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = 1; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING to 0" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = 0; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING to on" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = on; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = DEFAULT; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +"Trying to set variable @@session.ROCKSDB_ENABLE_TTL_READ_FILTERING to 444. It should fail because it is not session." +SET @@session.ROCKSDB_ENABLE_TTL_READ_FILTERING = 444; +ERROR HY000: Variable 'rocksdb_enable_ttl_read_filtering' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING to 'aaa'" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +"Trying to set variable @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING to 'bbb'" +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +SET @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING = @start_global_value; +SELECT @@global.ROCKSDB_ENABLE_TTL_READ_FILTERING; +@@global.ROCKSDB_ENABLE_TTL_READ_FILTERING +1 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result index c93152c475672..37107be469ff4 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_enable_write_thread_adaptive_yield_basic.result @@ -1,64 +1,7 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); SET @start_global_value = @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; SELECT @start_global_value; @start_global_value 0 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 1" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 1; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 0" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 0; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to on" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = on; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = DEFAULT; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -"Trying to set variable @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 444. It should fail because it is not session." -SET @@session.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 444; -ERROR HY000: Variable 'rocksdb_enable_write_thread_adaptive_yield' is a GLOBAL variable and should be set with SET GLOBAL -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'aaa'" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'aaa'; -Got one of the listed errors -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 'bbb'" -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 'bbb'; -Got one of the listed errors -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = @start_global_value; -SELECT @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD; -@@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD -0 -DROP TABLE valid_values; -DROP TABLE invalid_values; +"Trying to set variable @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD = 444; +ERROR HY000: Variable 'rocksdb_enable_write_thread_adaptive_yield' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result index 19be4e3ad5def..b8fe837d2e628 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_flush_log_at_trx_commit_basic.result @@ -8,10 +8,6 @@ SET @start_global_value = @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; SELECT @start_global_value; @start_global_value 1 -SET @start_session_value = @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -SELECT @start_session_value; -@start_session_value -1 '# Setting to valid values in global scope#' "Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2" SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2; @@ -43,37 +39,9 @@ SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT 1 -'# Setting to valid values in session scope#' -"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2" -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -2 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -1 -"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1" -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -1 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -1 -"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0" -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -0 -"Setting the session scope variable back to default" -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -1 +"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 444. It should fail because it is not session." +SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 444; +ERROR HY000: Variable 'rocksdb_flush_log_at_trx_commit' is a GLOBAL variable and should be set with SET GLOBAL '# Testing with invalid values in global scope #' "Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 'aaa'" SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 'aaa'; @@ -85,9 +53,5 @@ SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_global_value; SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT 1 -SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_session_value; -SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT; -@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT -1 DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_and_lzero_now_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_and_lzero_now_basic.result new file mode 100644 index 0000000000000..68cfeb07fc711 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_force_flush_memtable_and_lzero_now_basic.result @@ -0,0 +1,50 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +SET @start_global_value = @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW to 1" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = 1; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW to 0" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = 0; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +"Trying to set variable @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW to on" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = on; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = DEFAULT; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +"Trying to set variable @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW to 444. It should fail because it is not session." +SET @@session.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = 444; +ERROR HY000: Variable 'rocksdb_force_flush_memtable_and_lzero_now' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +SET @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW = @start_global_value; +SELECT @@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW; +@@global.ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_io_write_timeout_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_io_write_timeout_basic.result new file mode 100644 index 0000000000000..0917a3970f484 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_io_write_timeout_basic.result @@ -0,0 +1,86 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(10); +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(42); +INSERT INTO valid_values VALUES(142); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_IO_WRITE_TIMEOUT; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 10" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 10; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +10 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 100" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 100; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 0" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 0; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 42" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 42; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +42 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 142" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 142; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +142 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = DEFAULT; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Trying to set variable @@session.ROCKSDB_IO_WRITE_TIMEOUT to 444. It should fail because it is not session." +SET @@session.ROCKSDB_IO_WRITE_TIMEOUT = 444; +ERROR HY000: Variable 'rocksdb_io_write_timeout' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 'aaa'" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +"Trying to set variable @@global.ROCKSDB_IO_WRITE_TIMEOUT to 'bbb'" +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +SET @@global.ROCKSDB_IO_WRITE_TIMEOUT = @start_global_value; +SELECT @@global.ROCKSDB_IO_WRITE_TIMEOUT; +@@global.ROCKSDB_IO_WRITE_TIMEOUT +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result deleted file mode 100644 index 714f21011272e..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_compactions_basic.result +++ /dev/null @@ -1,46 +0,0 @@ -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(64); -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'abc\''); -SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -SELECT @start_global_value; -@start_global_value -1 -'# Setting to valid values in global scope#' -"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 1" -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 1; -SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS -1 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = DEFAULT; -SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS -1 -"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 64" -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 64; -SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS -64 -"Setting the global scope variable back to default" -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = DEFAULT; -SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS -1 -"Trying to set variable @@session.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 444. It should fail because it is not session." -SET @@session.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 444; -ERROR HY000: Variable 'rocksdb_max_background_compactions' is a GLOBAL variable and should be set with SET GLOBAL -'# Testing with invalid values in global scope #' -"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS to 'abc'" -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = 'abc'; -Got one of the listed errors -SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS -1 -SET @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS = @start_global_value; -SELECT @@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS; -@@global.ROCKSDB_MAX_BACKGROUND_COMPACTIONS -1 -DROP TABLE valid_values; -DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result deleted file mode 100644 index ff8f2b5997bcf..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_flushes_basic.result +++ /dev/null @@ -1,7 +0,0 @@ -SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES; -SELECT @start_global_value; -@start_global_value -1 -"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_MAX_BACKGROUND_FLUSHES = 444; -ERROR HY000: Variable 'rocksdb_max_background_flushes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_jobs_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_jobs_basic.result new file mode 100644 index 0000000000000..88e6d21c3ec8a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_max_background_jobs_basic.result @@ -0,0 +1,46 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(64); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'abc\''); +SET @start_global_value = @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +SELECT @start_global_value; +@start_global_value +2 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_JOBS to 1" +SET @@global.ROCKSDB_MAX_BACKGROUND_JOBS = 1; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +@@global.ROCKSDB_MAX_BACKGROUND_JOBS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_BACKGROUND_JOBS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +@@global.ROCKSDB_MAX_BACKGROUND_JOBS +2 +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_JOBS to 64" +SET @@global.ROCKSDB_MAX_BACKGROUND_JOBS = 64; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +@@global.ROCKSDB_MAX_BACKGROUND_JOBS +64 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_MAX_BACKGROUND_JOBS = DEFAULT; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +@@global.ROCKSDB_MAX_BACKGROUND_JOBS +2 +"Trying to set variable @@session.ROCKSDB_MAX_BACKGROUND_JOBS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_MAX_BACKGROUND_JOBS = 444; +ERROR HY000: Variable 'rocksdb_max_background_jobs' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_MAX_BACKGROUND_JOBS to 'abc'" +SET @@global.ROCKSDB_MAX_BACKGROUND_JOBS = 'abc'; +Got one of the listed errors +SELECT @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +@@global.ROCKSDB_MAX_BACKGROUND_JOBS +2 +SET @@global.ROCKSDB_MAX_BACKGROUND_JOBS = @start_global_value; +SELECT @@global.ROCKSDB_MAX_BACKGROUND_JOBS; +@@global.ROCKSDB_MAX_BACKGROUND_JOBS +2 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_reset_stats_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_reset_stats_basic.result new file mode 100644 index 0000000000000..d585e73489c27 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_reset_stats_basic.result @@ -0,0 +1,97 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +SET @start_global_value = @@global.ROCKSDB_RESET_STATS; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_RESET_STATS to 1" +SET @@global.ROCKSDB_RESET_STATS = 1; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RESET_STATS = DEFAULT; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@global.ROCKSDB_RESET_STATS to 0" +SET @@global.ROCKSDB_RESET_STATS = 0; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RESET_STATS = DEFAULT; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@global.ROCKSDB_RESET_STATS to on" +SET @@global.ROCKSDB_RESET_STATS = on; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RESET_STATS = DEFAULT; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@global.ROCKSDB_RESET_STATS to off" +SET @@global.ROCKSDB_RESET_STATS = off; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RESET_STATS = DEFAULT; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@global.ROCKSDB_RESET_STATS to true" +SET @@global.ROCKSDB_RESET_STATS = true; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RESET_STATS = DEFAULT; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@global.ROCKSDB_RESET_STATS to false" +SET @@global.ROCKSDB_RESET_STATS = false; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_RESET_STATS = DEFAULT; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@session.ROCKSDB_RESET_STATS to 444. It should fail because it is not session." +SET @@session.ROCKSDB_RESET_STATS = 444; +ERROR HY000: Variable 'rocksdb_reset_stats' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_RESET_STATS to 'aaa'" +SET @@global.ROCKSDB_RESET_STATS = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +"Trying to set variable @@global.ROCKSDB_RESET_STATS to 'bbb'" +SET @@global.ROCKSDB_RESET_STATS = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +SET @@global.ROCKSDB_RESET_STATS = @start_global_value; +SELECT @@global.ROCKSDB_RESET_STATS; +@@global.ROCKSDB_RESET_STATS +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_sst_mgr_rate_bytes_per_sec_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_sst_mgr_rate_bytes_per_sec_basic.result new file mode 100644 index 0000000000000..a714f1c2fdc7f --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_sst_mgr_rate_bytes_per_sec_basic.result @@ -0,0 +1,85 @@ +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); +SET @start_global_value = @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +SELECT @start_global_value; +@start_global_value +0 +'# Setting to valid values in global scope#' +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to 100" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = 100; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +100 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = DEFAULT; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to 1" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = 1; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +1 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = DEFAULT; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to 0" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = 0; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Setting the global scope variable back to default" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = DEFAULT; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@session.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to 444. It should fail because it is not session." +SET @@session.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = 444; +ERROR HY000: Variable 'rocksdb_sst_mgr_rate_bytes_per_sec' is a GLOBAL variable and should be set with SET GLOBAL +'# Testing with invalid values in global scope #' +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to 'aaa'" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = 'aaa'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to 'bbb'" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = 'bbb'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to '-1'" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = '-1'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to '101'" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = '101'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +"Trying to set variable @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC to '484436'" +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = '484436'; +Got one of the listed errors +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +SET @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC = @start_global_value; +SELECT @@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC; +@@global.ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +0 +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result new file mode 100644 index 0000000000000..5ad5394db29fa --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_update_cf_options_basic.result @@ -0,0 +1,108 @@ +call mtr.add_suppression("MyRocks: NULL is not a valid option for updates to column family settings."); +call mtr.add_suppression("Invalid cf options, '=' expected *"); +call mtr.add_suppression("MyRocks: failed to parse the updated column family options = *"); +call mtr.add_suppression("Invalid cf config for default in override options *"); +DROP TABLE IF EXISTS t1; +Warnings: +Note 1051 Unknown table 'test.t1' +CREATE TABLE `t1` ( +`col1` bigint(20) NOT NULL, +`col2` varbinary(64) NOT NULL, +`col3` varbinary(256) NOT NULL, +`col4` bigint(20) NOT NULL, +`col5` mediumblob NOT NULL, +PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=cf1;custom_p1_cfname=cf2', +UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=cf3' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS (`col2`) ( +PARTITION custom_p0 VALUES IN (0x12345), +PARTITION custom_p1 VALUES IN (0x23456), +PARTITION custom_p2 VALUES IN (0x34567), +PARTITION custom_p3 VALUES IN (0x45678), +PARTITION custom_p4 VALUES IN (0x56789), +PARTITION custom_p5 VALUES IN (0x6789A), +PARTITION custom_p6 VALUES IN (0x789AB), +PARTITION custom_p7 VALUES IN (0x89ABC) +); +USE information_schema; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +NULL +SET @@global.rocksdb_update_cf_options = NULL; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +NULL +SET @@global.rocksdb_update_cf_options = 'aaaaa'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +NULL +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +CF_NAME OPTION_TYPE VALUE +default WRITE_BUFFER_SIZE 67108864 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +default TARGET_FILE_SIZE_BASE 67108864 +SET @@global.rocksdb_update_cf_options = 'default={write_buffer_size=8m;target_file_size_base=2m};'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +default={write_buffer_size=8m;target_file_size_base=2m}; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +CF_NAME OPTION_TYPE VALUE +default WRITE_BUFFER_SIZE 8388608 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +default TARGET_FILE_SIZE_BASE 2097152 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +CF_NAME OPTION_TYPE VALUE +cf1 WRITE_BUFFER_SIZE 67108864 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +cf1 TARGET_FILE_SIZE_BASE 67108864 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +CF_NAME OPTION_TYPE VALUE +cf2 WRITE_BUFFER_SIZE 67108864 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='MAX_BYTES_FOR_LEVEL_MULTIPLIER'; +CF_NAME OPTION_TYPE VALUE +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf3' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +cf3 TARGET_FILE_SIZE_BASE 67108864 +SET @@global.rocksdb_update_cf_options = 'cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};cf3={target_file_size_base=4m};'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};cf3={target_file_size_base=4m}; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +CF_NAME OPTION_TYPE VALUE +cf1 WRITE_BUFFER_SIZE 8388608 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +cf1 TARGET_FILE_SIZE_BASE 2097152 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +CF_NAME OPTION_TYPE VALUE +cf2 WRITE_BUFFER_SIZE 16777216 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='MAX_BYTES_FOR_LEVEL_MULTIPLIER'; +CF_NAME OPTION_TYPE VALUE +cf2 MAX_BYTES_FOR_LEVEL_MULTIPLIER 8.000000 +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf3' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +cf3 TARGET_FILE_SIZE_BASE 4194304 +SET @@global.rocksdb_update_cf_options = 'cf3={target_file_size_base=24m};'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +cf3={target_file_size_base=24m}; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf3' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +cf3 TARGET_FILE_SIZE_BASE 25165824 +SET @@global.rocksdb_update_cf_options = 'cf1={target_file_size_base=24m};foo={max_bytes_for_level_multiplier=8};'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +cf1={target_file_size_base=24m};foo={max_bytes_for_level_multiplier=8}; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; +CF_NAME OPTION_TYPE VALUE +cf1 TARGET_FILE_SIZE_BASE 25165824 +SET @@global.rocksdb_update_cf_options = 'default={foo=bar};'; +SELECT @@global.rocksdb_update_cf_options; +@@global.rocksdb_update_cf_options +NULL +USE test; +DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_io_for_flush_and_compaction_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_io_for_flush_and_compaction_basic.result new file mode 100644 index 0000000000000..219cdb7319c2e --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_io_for_flush_and_compaction_basic.result @@ -0,0 +1,7 @@ +SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_IO_FOR_FLUSH_AND_COMPACTION; +SELECT @start_global_value; +@start_global_value +0 +"Trying to set variable @@global.ROCKSDB_USE_DIRECT_IO_FOR_FLUSH_AND_COMPACTION to 444. It should fail because it is readonly." +SET @@global.ROCKSDB_USE_DIRECT_IO_FOR_FLUSH_AND_COMPACTION = 444; +ERROR HY000: Variable 'rocksdb_use_direct_io_for_flush_and_compaction' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result deleted file mode 100644 index 4cc787e4586c8..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_use_direct_writes_basic.result +++ /dev/null @@ -1,7 +0,0 @@ -SET @start_global_value = @@global.ROCKSDB_USE_DIRECT_WRITES; -SELECT @start_global_value; -@start_global_value -0 -"Trying to set variable @@global.ROCKSDB_USE_DIRECT_WRITES to 444. It should fail because it is readonly." -SET @@global.ROCKSDB_USE_DIRECT_WRITES = 444; -ERROR HY000: Variable 'rocksdb_use_direct_writes' is a read only variable diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_batch_max_bytes_basic.result b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_batch_max_bytes_basic.result new file mode 100644 index 0000000000000..af4da8177d463 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/r/rocksdb_write_batch_max_bytes_basic.result @@ -0,0 +1,15 @@ +create table t (i int); +insert into t values (1), (2), (3), (4), (5); +set session rocksdb_write_batch_max_bytes = 1000; +insert into t values (1), (2), (3), (4), (5); +set session rocksdb_write_batch_max_bytes = 10; +insert into t values (1), (2), (3), (4), (5); +ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Memory limit reached +set session rocksdb_write_batch_max_bytes = 0; +insert into t values (1), (2), (3), (4), (5); +set session rocksdb_write_batch_max_bytes = 10; +begin; +insert into t values (1), (2), (3), (4), (5); +ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Memory limit reached +rollback; +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test index fefd9e39af2d8..fc70035715593 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/all_vars.test @@ -1,3 +1,4 @@ +--source include/have_rocksdb.inc --source include/not_embedded.inc --source include/not_threadpool.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test index a6b753ba87aca..5934b06e979ca 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_access_hint_on_compaction_start_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_ACCESS_HINT_ON_COMPACTION_START --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test index b6ccea0f882de..284cc25633654 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_advise_random_on_open_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ADVISE_RANDOM_ON_OPEN --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test index b250aa5eb7f30..aaa65921cc3b1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_concurrent_memtable_write_basic.test @@ -1,18 +1,5 @@ --source include/have_rocksdb.inc - -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); - -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); - --let $sys_var=ROCKSDB_ALLOW_CONCURRENT_MEMTABLE_WRITE ---let $read_only=0 +--let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc - -DROP TABLE valid_values; -DROP TABLE invalid_values; +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test index 067f582004527..560badc49e726 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_reads_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ALLOW_MMAP_READS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test index 51fbf62d5a97a..79b9d08409de1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_allow_mmap_writes_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ALLOW_MMAP_WRITES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test deleted file mode 100644 index 8e49110513a1c..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_base_background_compactions_basic.test +++ /dev/null @@ -1,7 +0,0 @@ ---source include/have_rocksdb.inc - ---let $sys_var=ROCKSDB_BASE_BACKGROUND_COMPACTIONS ---let $read_only=1 ---let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc - diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test index 39265af4c9fcb..2767231db1ccd 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_blind_delete_primary_key_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_BLIND_DELETE_PRIMARY_KEY --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test index 68715796a04eb..febf51b0ebaf8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_cache_size_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BLOCK_CACHE_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test index 2b14e1fb65498..11a9551207205 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_restart_interval_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_BLOCK_RESTART_INTERVAL --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test index 11d18e3223fe1..1573fe058dbc8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BLOCK_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test index a54700aae4d6b..ed4c3aa0c6ecd 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_block_size_deviation_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BLOCK_SIZE_DEVIATION --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test index 6cd9e0e156007..641a3871fd57c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_BULK_LOAD --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test index 1b57255202bcc..01925037e6393 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bulk_load_size_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_BULK_LOAD_SIZE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test index 2958273695d72..fbe8039277e40 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_bytes_per_sync_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_BYTES_PER_SYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test index db1f593681205..50a7218e5027e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_cache_index_and_filter_blocks_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_CACHE_INDEX_AND_FILTER_BLOCKS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test index 44126e35f57fe..741d4d27493e8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_checksums_pct_basic.test @@ -11,7 +11,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_CHECKSUMS_PCT --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test index c47c62e41b475..7b851db642b0d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_collect_sst_properties_basic.test @@ -3,6 +3,6 @@ --let $sys_var=ROCKSDB_COLLECT_SST_PROPERTIES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test index 62c8e680aab63..ff0585534c4fc 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_commit_in_the_middle_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_COMMIT_IN_THE_MIDDLE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test index bbafd52605527..77a1a6bcab510 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compact_cf_basic.test @@ -14,7 +14,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test index ba45defb7a191..f3215196d383d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_readahead_size_basic.test @@ -17,7 +17,7 @@ SELECT @@global.rocksdb_compaction_readahead_size; --let $sys_var=ROCKSDB_COMPACTION_READAHEAD_SIZE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test index 5ec719baeb6ad..d2cd54e486793 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'2000001\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test index 6c35ed634f7cf..c5417b11a449a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_count_sd_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_COUNT_SD --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test index ff132f7049cb7..0381cd6b3a6cc 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_file_size_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_FILE_SIZE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test index b38c79b5ef07f..7b6cebdadf5fa 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_compaction_sequential_deletes_window_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'2000001\''); --let $sys_var=ROCKSDB_COMPACTION_SEQUENTIAL_DELETES_WINDOW --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test index 2850c7a1a3863..a53df21524f42 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_checkpoint_basic.test @@ -21,7 +21,7 @@ # Set back to original value # validate that DEFAULT causes failure in creating checkpoint since # DEFAULT == '' ---error ER_UNKNOWN_ERROR +--error ER_RDB_STATUS_GENERAL --eval SET @@global.ROCKSDB_CREATE_CHECKPOINT = @start_value # clean up diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test index 77422aa164c58..80db24ca37543 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_if_missing_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_CREATE_IF_MISSING --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test index b8aeb6c9b19f2..4020e15952dda 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_create_missing_column_families_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_CREATE_MISSING_COLUMN_FAMILIES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test index 20f33d6bdfd0d..c84df3a99328e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_datadir_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DATADIR --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test index 7ef5422dcd391..5eb66aa60ff84 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_db_write_buffer_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DB_WRITE_BUFFER_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test index aa532fdc1bee4..d87a7c38a957a 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_deadlock_detect_basic.test @@ -14,7 +14,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=1 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test index 52e25ab358f04..001712382ee38 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_optimizer_no_zero_cardinality_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_DEBUG_OPTIMIZER_NO_ZERO_CARDINALITY --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_read_filter_ts_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_read_filter_ts_basic.test new file mode 100644 index 0000000000000..c3837ff1454cc --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_read_filter_ts_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2400); +INSERT INTO valid_values VALUES(-2400); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_DEBUG_TTL_READ_FILTER_TS +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_rec_ts_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_rec_ts_basic.test new file mode 100644 index 0000000000000..14c3e3d30aa69 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_rec_ts_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2400); +INSERT INTO valid_values VALUES(-2400); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_DEBUG_TTL_REC_TS +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_snapshot_ts_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_snapshot_ts_basic.test new file mode 100644 index 0000000000000..af507fbe7db3a --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_debug_ttl_snapshot_ts_basic.test @@ -0,0 +1,16 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(2400); +INSERT INTO valid_values VALUES(-2400); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); + +--let $sys_var=ROCKSDB_DEBUG_TTL_SNAPSHOT_TS +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test index f756d1eb2f515..ed2210264525c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_default_cf_options_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DEFAULT_CF_OPTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test index c8824a634cb7f..b54e41a0e153d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delayed_write_rate_basic.test @@ -15,7 +15,7 @@ INSERT INTO invalid_values VALUES('\'484436\''); --let $sys_var=ROCKSDB_DELAYED_WRITE_RATE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test index 744bd946d9a89..78a9012a63f36 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_delete_obsolete_files_period_micros_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_2pc_basic.test similarity index 91% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_2pc_basic.test index 1badcef0347a0..31774adc9b336 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_disable_2pc_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_2pc_basic.test @@ -14,7 +14,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test index 407093acbea50..3a696aff74dc2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_bulk_load_api_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_ENABLE_BULK_LOAD_API --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test index 251d7d5803d06..266537cb3d36c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_thread_tracking_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_ENABLE_THREAD_TRACKING --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_basic.test similarity index 76% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_basic.test index e0c2bd366cc0c..8afb2f64e28ef 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_background_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_basic.test @@ -4,15 +4,15 @@ CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; INSERT INTO valid_values VALUES(1); INSERT INTO valid_values VALUES(0); INSERT INTO valid_values VALUES('on'); -INSERT INTO valid_values VALUES('off'); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); ---let $sys_var=ROCKSDB_BACKGROUND_SYNC +--let $sys_var=ROCKSDB_ENABLE_TTL --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_read_filtering_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_read_filtering_basic.test new file mode 100644 index 0000000000000..cc034ed47d6db --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_ttl_read_filtering_basic.test @@ -0,0 +1,18 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_ENABLE_TTL_READ_FILTERING +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test index 9d6502598b0dd..4b0c123c70a43 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_enable_write_thread_adaptive_yield_basic.test @@ -1,18 +1,5 @@ --source include/have_rocksdb.inc - -CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO valid_values VALUES(1); -INSERT INTO valid_values VALUES(0); -INSERT INTO valid_values VALUES('on'); - -CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; -INSERT INTO invalid_values VALUES('\'aaa\''); -INSERT INTO invalid_values VALUES('\'bbb\''); - --let $sys_var=ROCKSDB_ENABLE_WRITE_THREAD_ADAPTIVE_YIELD ---let $read_only=0 +--let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc - -DROP TABLE valid_values; -DROP TABLE invalid_values; +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test index 495770e8efb8e..19d5c49026057 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_error_if_exists_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_ERROR_IF_EXISTS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test index 6bd471d83ab1d..4dcab5877c52f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_flush_log_at_trx_commit_basic.test @@ -10,9 +10,8 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT --let $read_only=0 ---let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--let $session=0 +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; - diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_and_lzero_now_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_and_lzero_now_basic.test new file mode 100644 index 0000000000000..5eeac50104072 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_and_lzero_now_basic.test @@ -0,0 +1,17 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; + +--let $sys_var=ROCKSDB_FORCE_FLUSH_MEMTABLE_AND_LZERO_NOW +--let $read_only=0 +--let $session=0 +--let $sticky=1 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test index 9529fae7516d4..25e4a9e328f8f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_flush_memtable_now_basic.test @@ -11,7 +11,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test index 08e8d0c16deec..4eb52e8092c65 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_force_index_records_in_range_basic.test @@ -17,7 +17,7 @@ SELECT @@session.rocksdb_force_index_records_in_range; --let $sys_var=ROCKSDB_FORCE_INDEX_RECORDS_IN_RANGE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test index 5899f7b67d005..ea8aefcf1b514 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_hash_index_allow_collision_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_HASH_INDEX_ALLOW_COLLISION --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test index 711703c2148ee..74d20110cf2fa 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_index_type_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_INDEX_TYPE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test index 990a9a6214891..a244dad12167c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_info_log_level_basic.test @@ -15,7 +15,7 @@ INSERT INTO invalid_values VALUES('foo'); --let $sys_var=ROCKSDB_INFO_LOG_LEVEL --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_io_write_timeout_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_io_write_timeout_basic.test new file mode 100644 index 0000000000000..4433eb2632dbf --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_io_write_timeout_basic.test @@ -0,0 +1,20 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(10); +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES(42); +INSERT INTO valid_values VALUES(142); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_IO_WRITE_TIMEOUT +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test index 741e20fac9f7d..1ab8a025921f3 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_is_fd_close_on_exec_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_IS_FD_CLOSE_ON_EXEC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test index 511f9f8a06d58..8577295f2ab27 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_keep_log_file_num_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_KEEP_LOG_FILE_NUM --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test index 52f7f502d965e..9e80c9b24b57c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_scanned_rows_basic.test @@ -16,7 +16,7 @@ INSERT INTO invalid_values VALUES(1000); --let $sys_var=ROCKSDB_LOCK_SCANNED_ROWS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test index 0c524db9cbde0..861892d9b3dee 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_lock_wait_timeout_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_LOCK_WAIT_TIMEOUT --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test index 76aee161efc97..55f5222766e99 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_log_file_time_to_roll_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_LOG_FILE_TIME_TO_ROLL --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test index 48d14fbf9f6ff..ca1051125d073 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_manifest_preallocation_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MANIFEST_PREALLOCATION_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test index ac4a6f7bca0b4..c174c67512d44 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_master_skip_tx_api_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_MASTER_SKIP_TX_API --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test deleted file mode 100644 index de3ab148ec688..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_flushes_basic.test +++ /dev/null @@ -1,6 +0,0 @@ ---source include/have_rocksdb.inc - ---let $sys_var=ROCKSDB_MAX_BACKGROUND_FLUSHES ---let $read_only=1 ---let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_jobs_basic.test similarity index 79% rename from storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test rename to storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_jobs_basic.test index 5fcc4e6ef25b0..3597512abdfba 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_compactions_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_background_jobs_basic.test @@ -7,10 +7,10 @@ INSERT INTO valid_values VALUES(64); CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; INSERT INTO invalid_values VALUES('\'abc\''); ---let $sys_var=ROCKSDB_MAX_BACKGROUND_COMPACTIONS +--let $sys_var=ROCKSDB_MAX_BACKGROUND_JOBS --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test index b0dca55e18b45..3f01e94d6abbe 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_log_file_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_LOG_FILE_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test index 9464f0aa1ad2d..da6851229f8df 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_manifest_file_size_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_MAX_MANIFEST_FILE_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test index c82af39f7b5d3..47155b15137a2 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_open_files_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_OPEN_FILES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test index a9e440d4b980c..b4d1b5f047420 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_row_locks_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_MAX_ROW_LOCKS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test index 0ebc9c204fb91..a27ef2169d455 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_subcompactions_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_MAX_SUBCOMPACTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test index 0f881868ae27e..d758ca604dd4b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_max_total_wal_size_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_MAX_TOTAL_WAL_SIZE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test index cc84a2c60be45..004d25a261900 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_new_table_reader_for_compaction_inputs_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_NEW_TABLE_READER_FOR_COMPACTION_INPUTS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test index 39c84fb2c2df9..21c34ee5627b0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_no_block_cache_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_NO_BLOCK_CACHE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test index bc680c0772a64..dc1ab74175807 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_override_cf_options_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_OVERRIDE_CF_OPTIONS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test index 5b0e4798678e6..4b006daf6104c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_paranoid_checks_basic.test @@ -3,5 +3,5 @@ --let $sys_var=ROCKSDB_PARANOID_CHECKS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test index fd2f30988409c..282b44b4a4eb0 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pause_background_work_basic.test @@ -14,7 +14,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test index 1fd61a8095510..8a42a30b78a54 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_perf_context_level_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_PERF_CONTEXT_LEVEL --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test index c084027425354..fe44b22ad47ec 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_path_basic.test @@ -10,7 +10,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $read_only=1 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test index 45bfaaf9ef2aa..9248668a0a7b5 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_persistent_cache_size_mb_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE_MB --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test index af0950979094f..08559cb0792a5 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_pin_l0_filter_and_index_blocks_in_cache_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test index 92a419a8636b3..07a605258475f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_print_snapshot_conflict_queries_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_PRINT_SNAPSHOT_CONFLICT_QUERIES --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test index d683e8045da8c..91d7f65c8d130 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_rate_limiter_bytes_per_sec_basic.test @@ -36,7 +36,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''), (3.14); # Test all the valid and invalid values --let $sys_var=ROCKSDB_RATE_LIMITER_BYTES_PER_SEC --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test index 9ff20edcfb2cf..b5ca387b47c89 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_read_free_rpl_tables_basic.test @@ -9,7 +9,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $sys_var=ROCKSDB_READ_FREE_RPL_TABLES --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test index 4fab0b3123c76..9e1b6ea924425 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_records_in_range_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_RECORDS_IN_RANGE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_reset_stats_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_reset_stats_basic.test new file mode 100644 index 0000000000000..62f75a3bcc507 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_reset_stats_basic.test @@ -0,0 +1,21 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); +INSERT INTO valid_values VALUES('on'); +INSERT INTO valid_values VALUES('off'); +INSERT INTO valid_values VALUES('true'); +INSERT INTO valid_values VALUES('false'); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); + +--let $sys_var=ROCKSDB_RESET_STATS +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test index a71df41affc51..9b6d89042651d 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_seconds_between_stat_computes_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SECONDS_BETWEEN_STAT_COMPUTES --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test index b33f444199bf8..449014217c568 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_signal_drop_index_thread_basic.test @@ -13,7 +13,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $read_only=0 --let $session=0 --let $sticky=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test index 80a9c4b3c43fc..c2508e2bc145f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_bloom_filter_on_read_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SKIP_BLOOM_FILTER_ON_READ --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test index 2465e569f79c9..73d6f91df657c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_fill_cache_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_SKIP_FILL_CACHE --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test index c64eeedb59476..4b55e76d8ff41 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_skip_unique_check_tables_basic.test @@ -9,7 +9,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; --let $sys_var=ROCKSDB_SKIP_UNIQUE_CHECK_TABLES --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_sst_mgr_rate_bytes_per_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_sst_mgr_rate_bytes_per_sec_basic.test new file mode 100644 index 0000000000000..3492596d74be1 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_sst_mgr_rate_bytes_per_sec_basic.test @@ -0,0 +1,22 @@ +--source include/have_rocksdb.inc + +CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO valid_values VALUES(100); +INSERT INTO valid_values VALUES(1); +INSERT INTO valid_values VALUES(0); + +CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam; +INSERT INTO invalid_values VALUES('\'aaa\''); +INSERT INTO invalid_values VALUES('\'bbb\''); +INSERT INTO invalid_values VALUES('\'-1\''); +INSERT INTO invalid_values VALUES('\'101\''); +INSERT INTO invalid_values VALUES('\'484436\''); + +--let $sys_var=ROCKSDB_SST_MGR_RATE_BYTES_PER_SEC +--let $read_only=0 +--let $session=0 +--source ../include/rocksdb_sys_var.inc + +DROP TABLE valid_values; +DROP TABLE invalid_values; + diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test index 7854faa8ddfa7..90f6296aef2f7 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_stats_dump_period_sec_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_STATS_DUMP_PERIOD_SEC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test index e9c04bcc45e3b..a58cb370438e6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_store_row_debug_checksums_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_STORE_ROW_DEBUG_CHECKSUMS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test index eabc45ef6be0f..4ac328cca6116 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_strict_collation_check_basic.test @@ -13,7 +13,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_STRICT_COLLATION_CHECK --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test index 77da9df9acdfc..2c50b015a76e1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_cache_numshardbits_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_TABLE_CACHE_NUMSHARDBITS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test index c3016742042c9..41c7cf54795e6 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_table_stats_sampling_pct_basic.test @@ -15,7 +15,7 @@ INSERT INTO invalid_values VALUES('\'484436\''); --let $sys_var=ROCKSDB_TABLE_STATS_SAMPLING_PCT --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test index 83a0faaffe666..cc27e6e065ace 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_trace_sst_api_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_TRACE_SST_API --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test index 302a4173efcae..bfcafcfac0ae9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_unsafe_for_binlog_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_UNSAFE_FOR_BINLOG --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test new file mode 100644 index 0000000000000..15d5d870ae648 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test @@ -0,0 +1,94 @@ +--source include/have_rocksdb.inc + +call mtr.add_suppression("MyRocks: NULL is not a valid option for updates to column family settings."); +call mtr.add_suppression("Invalid cf options, '=' expected *"); +call mtr.add_suppression("MyRocks: failed to parse the updated column family options = *"); +call mtr.add_suppression("Invalid cf config for default in override options *"); + +DROP TABLE IF EXISTS t1; + +# Need a table which has multiple partitions and column families associated +# with them to make sure that we're testing the valid scenario. +CREATE TABLE `t1` ( + `col1` bigint(20) NOT NULL, + `col2` varbinary(64) NOT NULL, + `col3` varbinary(256) NOT NULL, + `col4` bigint(20) NOT NULL, + `col5` mediumblob NOT NULL, + PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=cf1;custom_p1_cfname=cf2', + UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=cf3' +) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 + PARTITION BY LIST COLUMNS (`col2`) ( + PARTITION custom_p0 VALUES IN (0x12345), + PARTITION custom_p1 VALUES IN (0x23456), + PARTITION custom_p2 VALUES IN (0x34567), + PARTITION custom_p3 VALUES IN (0x45678), + PARTITION custom_p4 VALUES IN (0x56789), + PARTITION custom_p5 VALUES IN (0x6789A), + PARTITION custom_p6 VALUES IN (0x789AB), + PARTITION custom_p7 VALUES IN (0x89ABC) +); + +USE information_schema; + +# We should start with NULL. +SELECT @@global.rocksdb_update_cf_options; + +# ... and we should be able to handle NULL and issue a reasonable warning. +SET @@global.rocksdb_update_cf_options = NULL; +SELECT @@global.rocksdb_update_cf_options; + +# Will fail to parse. Value not updated. +SET @@global.rocksdb_update_cf_options = 'aaaaa'; +SELECT @@global.rocksdb_update_cf_options; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +# All good. Use default CF. +SET @@global.rocksdb_update_cf_options = 'default={write_buffer_size=8m;target_file_size_base=2m};'; +SELECT @@global.rocksdb_update_cf_options; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='default' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='MAX_BYTES_FOR_LEVEL_MULTIPLIER'; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf3' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +# All good. Use multiple valid CF-s. +SET @@global.rocksdb_update_cf_options = 'cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};cf3={target_file_size_base=4m};'; +SELECT @@global.rocksdb_update_cf_options; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='WRITE_BUFFER_SIZE'; +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf2' AND OPTION_TYPE='MAX_BYTES_FOR_LEVEL_MULTIPLIER'; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf3' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +# All good. Use a single valid CF. +SET @@global.rocksdb_update_cf_options = 'cf3={target_file_size_base=24m};'; +SELECT @@global.rocksdb_update_cf_options; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf3' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +# Some parts are good. Value still updated. +SET @@global.rocksdb_update_cf_options = 'cf1={target_file_size_base=24m};foo={max_bytes_for_level_multiplier=8};'; +SELECT @@global.rocksdb_update_cf_options; + +SELECT * FROM ROCKSDB_CF_OPTIONS WHERE CF_NAME='cf1' AND OPTION_TYPE='TARGET_FILE_SIZE_BASE'; + +# Will fail to parse. No valid assignments included. Value not updated and +# reset to NULL. +SET @@global.rocksdb_update_cf_options = 'default={foo=bar};'; +SELECT @@global.rocksdb_update_cf_options; + +USE test; + +DROP TABLE t1; \ No newline at end of file diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test index a0f0a21298754..832118a9c8b2e 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_adaptive_mutex_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_USE_ADAPTIVE_MUTEX --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_io_for_flush_and_compaction_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_io_for_flush_and_compaction_basic.test new file mode 100644 index 0000000000000..f5dde2aa0a346 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_io_for_flush_and_compaction_basic.test @@ -0,0 +1,6 @@ +--source include/have_rocksdb.inc + +--let $sys_var=ROCKSDB_USE_DIRECT_IO_FOR_FLUSH_AND_COMPACTION +--let $read_only=1 +--let $session=0 +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test index b730eca6f3b25..46b646647e52b 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_reads_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_USE_DIRECT_READS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test deleted file mode 100644 index 2abb2478d824d..0000000000000 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_direct_writes_basic.test +++ /dev/null @@ -1,6 +0,0 @@ ---source include/have_rocksdb.inc - ---let $sys_var=ROCKSDB_USE_DIRECT_WRITES ---let $read_only=1 ---let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test index 0d8e35d03cbba..93e6e40754c99 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_use_fsync_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_USE_FSYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test index 6eb965c586327..2357ed756385f 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_validate_tables_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_VALIDATE_TABLES --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test index cc35fdb03454f..1c1c9a921e988 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_verify_row_debug_checksums_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_VERIFY_ROW_DEBUG_CHECKSUMS --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test index eeeeed8f767c3..5296a37489fc1 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_bytes_per_sync_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_BYTES_PER_SYNC --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test index 0d667d8de2c86..24632842f2460 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_dir_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_DIR --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test index e0d345b42685d..987040d6793fe 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_recovery_mode_basic.test @@ -10,7 +10,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_WAL_RECOVERY_MODE --let $read_only=0 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test index d597b5d801607..b0b9e2f535532 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_size_limit_mb_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_SIZE_LIMIT_MB --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test index 9342c861168ed..ef6b6c8764902 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_wal_ttl_seconds_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WAL_TTL_SECONDS --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test index ff332a6fbe480..7adf29a89b0b8 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_whole_key_filtering_basic.test @@ -3,4 +3,4 @@ --let $sys_var=ROCKSDB_WHOLE_KEY_FILTERING --let $read_only=1 --let $session=0 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_batch_max_bytes_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_batch_max_bytes_basic.test new file mode 100644 index 0000000000000..295d8e1594bb7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_batch_max_bytes_basic.test @@ -0,0 +1,26 @@ +--source include/have_rocksdb.inc + +create table t (i int); + +insert into t values (1), (2), (3), (4), (5); + +set session rocksdb_write_batch_max_bytes = 1000; + +insert into t values (1), (2), (3), (4), (5); + +set session rocksdb_write_batch_max_bytes = 10; + +--error ER_RDB_STATUS_GENERAL +insert into t values (1), (2), (3), (4), (5); + +set session rocksdb_write_batch_max_bytes = 0; + +insert into t values (1), (2), (3), (4), (5); + +set session rocksdb_write_batch_max_bytes = 10; +begin; +--error ER_RDB_STATUS_GENERAL +insert into t values (1), (2), (3), (4), (5); +rollback; + +drop table t; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test index f7fa33e03e67f..fdd868ac7bcb9 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_disable_wal_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'aaa\''); --let $sys_var=ROCKSDB_WRITE_DISABLE_WAL --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test index 1c9d35f4a57e2..83174e996f01c 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_write_ignore_missing_column_families_basic.test @@ -12,7 +12,7 @@ INSERT INTO invalid_values VALUES('\'bbb\''); --let $sys_var=ROCKSDB_WRITE_IGNORE_MISSING_COLUMN_FAMILIES --let $read_only=0 --let $session=1 ---source suite/sys_vars/inc/rocksdb_sys_var.inc +--source ../include/rocksdb_sys_var.inc DROP TABLE valid_values; DROP TABLE invalid_values; diff --git a/storage/rocksdb/rdb_buff.h b/storage/rocksdb/rdb_buff.h index f035085cae6f3..7a8a00215095a 100644 --- a/storage/rocksdb/rdb_buff.h +++ b/storage/rocksdb/rdb_buff.h @@ -280,6 +280,16 @@ class Rdb_string_reader { } } + bool read_uint64(uint64 *const res) { + const uchar *p; + if (!(p = reinterpret_cast(read(sizeof(uint64))))) { + return true; // error + } else { + *res = rdb_netbuf_to_uint64(p); + return false; // Ok + } + } + uint remaining_bytes() const { return m_len; } /* diff --git a/storage/rocksdb/rdb_cf_manager.cc b/storage/rocksdb/rdb_cf_manager.cc index e81435248ec17..bfdcaf80b9148 100644 --- a/storage/rocksdb/rdb_cf_manager.cc +++ b/storage/rocksdb/rdb_cf_manager.cc @@ -36,7 +36,7 @@ bool Rdb_cf_manager::is_cf_name_reverse(const char *const name) { } void Rdb_cf_manager::init( - Rdb_cf_options *const cf_options, + std::unique_ptr cf_options, std::vector *const handles) { mysql_mutex_init(rdb_cfm_mutex_key, &m_mutex, MY_MUTEX_INIT_FAST); @@ -44,7 +44,7 @@ void Rdb_cf_manager::init( DBUG_ASSERT(handles != nullptr); DBUG_ASSERT(handles->size() > 0); - m_cf_options = cf_options; + m_cf_options = std::move(cf_options); for (auto cfh : *handles) { DBUG_ASSERT(cfh != nullptr); @@ -58,21 +58,7 @@ void Rdb_cf_manager::cleanup() { delete it.second; } mysql_mutex_destroy(&m_mutex); -} - -/** - Generate Column Family name for per-index column families - - @param res OUT Column Family name -*/ - -void Rdb_cf_manager::get_per_index_cf_name(const std::string &db_table_name, - const char *const index_name, - std::string *const res) { - DBUG_ASSERT(index_name != nullptr); - DBUG_ASSERT(res != nullptr); - - *res = db_table_name + "." + index_name; + m_cf_options = nullptr; } /* @@ -83,32 +69,22 @@ void Rdb_cf_manager::get_per_index_cf_name(const std::string &db_table_name, See Rdb_cf_manager::get_cf */ rocksdb::ColumnFamilyHandle * -Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, - const std::string &db_table_name, - const char *const index_name, - bool *const is_automatic) { +Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, + const std::string &cf_name_arg) { DBUG_ASSERT(rdb != nullptr); - DBUG_ASSERT(is_automatic != nullptr); rocksdb::ColumnFamilyHandle *cf_handle = nullptr; - RDB_MUTEX_LOCK_CHECK(m_mutex); - - *is_automatic = false; - - if (cf_name == nullptr || *cf_name == '\0') { - cf_name = DEFAULT_CF_NAME; + if (cf_name_arg == PER_INDEX_CF_NAME) { + // per-index column families is no longer supported. + my_error(ER_PER_INDEX_CF_DEPRECATED, MYF(0)); + return nullptr; } - DBUG_ASSERT(cf_name != nullptr); + const std::string &cf_name = + cf_name_arg.empty() ? DEFAULT_CF_NAME : cf_name_arg; - std::string per_index_name; - - if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { - get_per_index_cf_name(db_table_name, index_name, &per_index_name); - cf_name = per_index_name.c_str(); - *is_automatic = true; - } + RDB_MUTEX_LOCK_CHECK(m_mutex); const auto it = m_cf_name_map.find(cf_name); @@ -116,19 +92,18 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, cf_handle = it->second; } else { /* Create a Column Family. */ - const std::string cf_name_str(cf_name); rocksdb::ColumnFamilyOptions opts; - m_cf_options->get_cf_options(cf_name_str, &opts); + m_cf_options->get_cf_options(cf_name, &opts); // NO_LINT_DEBUG sql_print_information("RocksDB: creating a column family %s", - cf_name_str.c_str()); + cf_name.c_str()); sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size); sql_print_information(" target_file_size_base=%" PRIu64, opts.target_file_size_base); const rocksdb::Status s = - rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle); + rdb->CreateColumnFamily(opts, cf_name, &cf_handle); if (s.ok()) { m_cf_name_map[cf_handle->GetName()] = cf_handle; @@ -145,47 +120,22 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, /* Find column family by its cf_name. - - @detail - dbname.tablename and index_name are also parameters, because - cf_name=PER_INDEX_CF_NAME means that column family name is a function - of table/index name. - - @param out is_automatic TRUE<=> column family name is auto-assigned based on - db_table_name and index_name. */ rocksdb::ColumnFamilyHandle * -Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name, - const char *const index_name, - bool *const is_automatic) const { - DBUG_ASSERT(is_automatic != nullptr); - +Rdb_cf_manager::get_cf(const std::string &cf_name_arg) const { rocksdb::ColumnFamilyHandle *cf_handle; - *is_automatic = false; - RDB_MUTEX_LOCK_CHECK(m_mutex); - if (cf_name == nullptr) { - cf_name = DEFAULT_CF_NAME; - } - - std::string per_index_name; - - if (!strcmp(cf_name, PER_INDEX_CF_NAME)) { - get_per_index_cf_name(db_table_name, index_name, &per_index_name); - DBUG_ASSERT(!per_index_name.empty()); - cf_name = per_index_name.c_str(); - *is_automatic = true; - } + std::string cf_name = cf_name_arg.empty() ? DEFAULT_CF_NAME : cf_name_arg; const auto it = m_cf_name_map.find(cf_name); cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr; if (!cf_handle) { // NO_LINT_DEBUG - sql_print_warning("Column family '%s' not found.", cf_name); + sql_print_warning("Column family '%s' not found.", cf_name.c_str()); } RDB_MUTEX_UNLOCK_CHECK(m_mutex); @@ -224,6 +174,7 @@ Rdb_cf_manager::get_all_cf(void) const { RDB_MUTEX_LOCK_CHECK(m_mutex); for (auto it : m_cf_id_map) { + DBUG_ASSERT(it.second != nullptr); list.push_back(it.second); } diff --git a/storage/rocksdb/rdb_cf_manager.h b/storage/rocksdb/rdb_cf_manager.h index 7b9654f3537fd..3f27747dce193 100644 --- a/storage/rocksdb/rdb_cf_manager.h +++ b/storage/rocksdb/rdb_cf_manager.h @@ -52,13 +52,9 @@ class Rdb_cf_manager { mutable mysql_mutex_t m_mutex; - static void get_per_index_cf_name(const std::string &db_table_name, - const char *const index_name, - std::string *const res); + std::unique_ptr m_cf_options = nullptr; - Rdb_cf_options *m_cf_options = nullptr; - -public: + public: Rdb_cf_manager(const Rdb_cf_manager &) = delete; Rdb_cf_manager &operator=(const Rdb_cf_manager &) = delete; Rdb_cf_manager() = default; @@ -70,25 +66,19 @@ class Rdb_cf_manager { column families that are present in the database. The first CF is the default CF. */ - void init(Rdb_cf_options *cf_options, + void init(std::unique_ptr cf_options, std::vector *const handles); void cleanup(); /* Used by CREATE TABLE. - cf_name=nullptr means use default column family - - cf_name=_auto_ means use 'dbname.tablename.indexname' */ - rocksdb::ColumnFamilyHandle * - get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name, - const std::string &db_table_name, - const char *const index_name, bool *const is_automatic); + rocksdb::ColumnFamilyHandle *get_or_create_cf(rocksdb::DB *const rdb, + const std::string &cf_name); /* Used by table open */ - rocksdb::ColumnFamilyHandle *get_cf(const char *cf_name, - const std::string &db_table_name, - const char *const index_name, - bool *const is_automatic) const; + rocksdb::ColumnFamilyHandle *get_cf(const std::string &cf_name) const; /* Look up cf by id; used by datadic */ rocksdb::ColumnFamilyHandle *get_cf(const uint32_t &id) const; @@ -106,6 +96,11 @@ class Rdb_cf_manager { MY_ATTRIBUTE((__nonnull__)) { m_cf_options->get_cf_options(cf_name, opts); } + + void update_options_map(const std::string &cf_name, + const std::string &updated_options) { + m_cf_options->update(cf_name, updated_options); + } }; } // namespace myrocks diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc index 97dc16fe4e6ee..8f183882277fd 100644 --- a/storage/rocksdb/rdb_cf_options.cc +++ b/storage/rocksdb/rdb_cf_options.cc @@ -72,16 +72,28 @@ void Rdb_cf_options::get(const std::string &cf_name, rocksdb::ColumnFamilyOptions *const opts) { DBUG_ASSERT(opts != nullptr); - // set defaults + // Get defaults. rocksdb::GetColumnFamilyOptionsFromString(*opts, m_default_config, opts); - // set per-cf config if we have one + // Get a custom confguration if we have one. Name_to_config_t::iterator it = m_name_map.find(cf_name); + if (it != m_name_map.end()) { rocksdb::GetColumnFamilyOptionsFromString(*opts, it->second, opts); } } +void Rdb_cf_options::update(const std::string &cf_name, + const std::string &cf_options) { + DBUG_ASSERT(!cf_name.empty()); + DBUG_ASSERT(!cf_options.empty()); + + // Always update. If we didn't have an entry before then add it. + m_name_map[cf_name] = cf_options; + + DBUG_ASSERT(!m_name_map.empty()); +} + bool Rdb_cf_options::set_default(const std::string &default_config) { rocksdb::ColumnFamilyOptions options; @@ -245,27 +257,30 @@ bool Rdb_cf_options::find_cf_options_pair(const std::string &input, return true; } -bool Rdb_cf_options::set_override(const std::string &override_config) { - // TODO(???): support updates? - +bool Rdb_cf_options::parse_cf_options(const std::string &cf_options, + Name_to_config_t *option_map) { std::string cf; std::string opt_str; rocksdb::ColumnFamilyOptions options; - Name_to_config_t configs; + + DBUG_ASSERT(option_map != nullptr); + DBUG_ASSERT(option_map->empty()); // Loop through the characters of the string until we reach the end. size_t pos = 0; - while (pos < override_config.size()) { + + while (pos < cf_options.size()) { // Attempt to find ={}. - if (!find_cf_options_pair(override_config, &pos, &cf, &opt_str)) + if (!find_cf_options_pair(cf_options, &pos, &cf, &opt_str)) { return false; + } // Generate an error if we have already seen this column family. - if (configs.find(cf) != configs.end()) { + if (option_map->find(cf) != option_map->end()) { // NO_LINT_DEBUG sql_print_warning( "Duplicate entry for %s in override options (options: %s)", - cf.c_str(), override_config.c_str()); + cf.c_str(), cf_options.c_str()); return false; } @@ -275,12 +290,22 @@ bool Rdb_cf_options::set_override(const std::string &override_config) { // NO_LINT_DEBUG sql_print_warning( "Invalid cf config for %s in override options (options: %s)", - cf.c_str(), override_config.c_str()); + cf.c_str(), cf_options.c_str()); return false; } // If everything is good, add this cf/opt_str pair to the map. - configs[cf] = opt_str; + (*option_map)[cf] = opt_str; + } + + return true; +} + +bool Rdb_cf_options::set_override(const std::string &override_config) { + Name_to_config_t configs; + + if (!parse_cf_options(override_config, &configs)) { + return false; } // Everything checked out - make the map live diff --git a/storage/rocksdb/rdb_cf_options.h b/storage/rocksdb/rdb_cf_options.h index 1cd80a131ad33..32f2308284fcf 100644 --- a/storage/rocksdb/rdb_cf_options.h +++ b/storage/rocksdb/rdb_cf_options.h @@ -40,6 +40,8 @@ namespace myrocks { */ class Rdb_cf_options { public: + using Name_to_config_t = std::unordered_map; + Rdb_cf_options(const Rdb_cf_options &) = delete; Rdb_cf_options &operator=(const Rdb_cf_options &) = delete; Rdb_cf_options() = default; @@ -47,6 +49,8 @@ class Rdb_cf_options { void get(const std::string &cf_name, rocksdb::ColumnFamilyOptions *const opts); + void update(const std::string &cf_name, const std::string &cf_options); + bool init(const rocksdb::BlockBasedTableOptions &table_options, std::shared_ptr prop_coll_factory, @@ -64,6 +68,9 @@ class Rdb_cf_options { rocksdb::ColumnFamilyOptions *const opts) MY_ATTRIBUTE((__nonnull__)); + static bool parse_cf_options(const std::string &cf_options, + Name_to_config_t *option_map); + private: bool set_default(const std::string &default_config); bool set_override(const std::string &overide_config); @@ -82,8 +89,6 @@ class Rdb_cf_options { static Rdb_pk_comparator s_pk_comparator; static Rdb_rev_comparator s_rev_pk_comparator; - typedef std::unordered_map Name_to_config_t; - /* CF name -> value map */ Name_to_config_t m_name_map; diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h index 9e0d69597ffc5..4696a1985b67d 100644 --- a/storage/rocksdb/rdb_compact_filter.h +++ b/storage/rocksdb/rdb_compact_filter.h @@ -22,6 +22,7 @@ /* C++ system header files */ #include +#include /* RocksDB includes */ #include "rocksdb/compaction_filter.h" @@ -38,7 +39,10 @@ class Rdb_compact_filter : public rocksdb::CompactionFilter { Rdb_compact_filter &operator=(const Rdb_compact_filter &) = delete; explicit Rdb_compact_filter(uint32_t _cf_id) : m_cf_id(_cf_id) {} - ~Rdb_compact_filter() {} + ~Rdb_compact_filter() { + // Increment stats by num expired at the end of compaction + rdb_update_global_stats(ROWS_EXPIRED, m_num_expired); + } // keys are passed in sorted order within the same sst. // V1 Filter is thread safe on our usage (creating from Factory). @@ -55,36 +59,133 @@ class Rdb_compact_filter : public rocksdb::CompactionFilter { gl_index_id.index_id = rdb_netbuf_to_uint32((const uchar *)key.data()); DBUG_ASSERT(gl_index_id.index_id >= 1); - if (gl_index_id != m_prev_index) // processing new index id - { - if (m_num_deleted > 0) { - m_num_deleted = 0; - } + if (gl_index_id != m_prev_index) { m_should_delete = rdb_get_dict_manager()->is_drop_index_ongoing(gl_index_id); + + if (!m_should_delete) { + get_ttl_duration_and_offset(gl_index_id, &m_ttl_duration, + &m_ttl_offset); + + if (m_ttl_duration != 0 && m_snapshot_timestamp == 0) { + /* + For efficiency reasons, we lazily call GetIntProperty to get the + oldest snapshot time (occurs once per compaction). + */ + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + if (!rdb->GetIntProperty(rocksdb::DB::Properties::kOldestSnapshotTime, + &m_snapshot_timestamp) || + m_snapshot_timestamp == 0) { + m_snapshot_timestamp = static_cast(std::time(nullptr)); + } + +#ifndef NDEBUG + int snapshot_ts = rdb_dbug_set_ttl_snapshot_ts(); + if (snapshot_ts) { + m_snapshot_timestamp = + static_cast(std::time(nullptr)) + snapshot_ts; + } +#endif + } + } + m_prev_index = gl_index_id; } if (m_should_delete) { m_num_deleted++; + return true; + } else if (m_ttl_duration > 0 && + should_filter_ttl_rec(key, existing_value)) { + m_num_expired++; + return true; } - return m_should_delete; + return false; } virtual bool IgnoreSnapshots() const override { return true; } virtual const char *Name() const override { return "Rdb_compact_filter"; } -private: + void get_ttl_duration_and_offset(const GL_INDEX_ID &gl_index_id, + uint64 *ttl_duration, + uint32 *ttl_offset) const { + DBUG_ASSERT(ttl_duration != nullptr); + /* + If TTL is disabled set ttl_duration to 0. This prevents the compaction + filter from dropping expired records. + */ + if (!rdb_is_ttl_enabled()) { + *ttl_duration = 0; + return; + } + + /* + If key is part of system column family, it's definitely not a TTL key. + */ + rocksdb::ColumnFamilyHandle *s_cf = rdb_get_dict_manager()->get_system_cf(); + if (s_cf == nullptr || gl_index_id.cf_id == s_cf->GetID()) { + *ttl_duration = 0; + return; + } + + struct Rdb_index_info index_info; + if (!rdb_get_dict_manager()->get_index_info(gl_index_id, &index_info)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Could not get index information " + "for Index Number (%u,%u)", + gl_index_id.cf_id, gl_index_id.index_id); + } + + *ttl_duration = index_info.m_ttl_duration; + if (Rdb_key_def::has_index_flag(index_info.m_index_flags, + Rdb_key_def::TTL_FLAG)) { + *ttl_offset = Rdb_key_def::calculate_index_flag_offset( + index_info.m_index_flags, Rdb_key_def::TTL_FLAG); + } + } + + bool should_filter_ttl_rec(const rocksdb::Slice &key, + const rocksdb::Slice &existing_value) const { + uint64 ttl_timestamp; + Rdb_string_reader reader(&existing_value); + if (!reader.read(m_ttl_offset) || reader.read_uint64(&ttl_timestamp)) { + std::string buf; + buf = rdb_hexdump(existing_value.data(), existing_value.size(), + RDB_MAX_HEXDUMP_LEN); + // NO_LINT_DEBUG + sql_print_error("Decoding ttl from PK value failed in compaction filter, " + "for index (%u,%u), val: %s", + m_prev_index.cf_id, m_prev_index.index_id, buf.c_str()); + abort_with_stack_traces(); + } + + /* + Filter out the record only if it is older than the oldest snapshot + timestamp. This prevents any rows from expiring in the middle of + long-running transactions. + */ + return ttl_timestamp + m_ttl_duration <= m_snapshot_timestamp; + } + + private: // Column family for this compaction filter const uint32_t m_cf_id; // Index id of the previous record mutable GL_INDEX_ID m_prev_index = {0, 0}; // Number of rows deleted for the same index id mutable uint64 m_num_deleted = 0; + // Number of rows expired for the TTL index + mutable uint64 m_num_expired = 0; // Current index id should be deleted or not (should be deleted if true) mutable bool m_should_delete = false; + // TTL duration for the current index if TTL is enabled + mutable uint64 m_ttl_duration = 0; + // TTL offset for all records in the current index + mutable uint32 m_ttl_offset = 0; + // Oldest snapshot timestamp at the time a TTL index is discovered + mutable uint64_t m_snapshot_timestamp = 0; }; class Rdb_compact_filter_factory : public rocksdb::CompactionFilterFactory { diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index f81fe59fe7c5b..7d6001a0f9835 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -55,16 +55,20 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg, rocksdb::ColumnFamilyHandle *cf_handle_arg, uint16_t index_dict_version_arg, uchar index_type_arg, uint16_t kv_format_version_arg, bool is_reverse_cf_arg, - bool is_auto_cf_arg, bool is_per_partition_cf_arg, - const char *_name, Rdb_index_stats _stats) + bool is_per_partition_cf_arg, const char *_name, + Rdb_index_stats _stats, uint32 index_flags_bitmap, + uint32 ttl_rec_offset, uint64 ttl_duration) : m_index_number(indexnr_arg), m_cf_handle(cf_handle_arg), m_index_dict_version(index_dict_version_arg), m_index_type(index_type_arg), m_kv_format_version(kv_format_version_arg), - m_is_reverse_cf(is_reverse_cf_arg), m_is_auto_cf(is_auto_cf_arg), - m_is_per_partition_cf(is_per_partition_cf_arg), - m_name(_name), m_stats(_stats), m_pk_part_no(nullptr), - m_pack_info(nullptr), m_keyno(keyno_arg), m_key_parts(0), - m_prefix_extractor(nullptr), m_maxlength(0) // means 'not intialized' + m_is_reverse_cf(is_reverse_cf_arg), + m_is_per_partition_cf(is_per_partition_cf_arg), m_name(_name), + m_stats(_stats), m_index_flags_bitmap(index_flags_bitmap), + m_ttl_rec_offset(ttl_rec_offset), m_ttl_duration(ttl_duration), + m_ttl_column(""), m_pk_part_no(nullptr), m_pack_info(nullptr), + m_keyno(keyno_arg), m_key_parts(0), m_ttl_pk_key_part_offset(UINT_MAX), + m_ttl_field_offset(UINT_MAX), m_prefix_extractor(nullptr), + m_maxlength(0) // means 'not intialized' { mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); @@ -73,11 +77,15 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg, Rdb_key_def::Rdb_key_def(const Rdb_key_def &k) : m_index_number(k.m_index_number), m_cf_handle(k.m_cf_handle), - m_is_reverse_cf(k.m_is_reverse_cf), m_is_auto_cf(k.m_is_auto_cf), - m_is_per_partition_cf(k.m_is_per_partition_cf), - m_name(k.m_name), m_stats(k.m_stats), m_pk_part_no(k.m_pk_part_no), + m_is_reverse_cf(k.m_is_reverse_cf), + m_is_per_partition_cf(k.m_is_per_partition_cf), m_name(k.m_name), + m_stats(k.m_stats), m_index_flags_bitmap(k.m_index_flags_bitmap), + m_ttl_rec_offset(k.m_ttl_rec_offset), m_ttl_duration(k.m_ttl_duration), + m_ttl_column(k.m_ttl_column), m_pk_part_no(k.m_pk_part_no), m_pack_info(k.m_pack_info), m_keyno(k.m_keyno), - m_key_parts(k.m_key_parts), m_prefix_extractor(k.m_prefix_extractor), + m_key_parts(k.m_key_parts), + m_ttl_pk_key_part_offset(k.m_ttl_pk_key_part_offset), + m_ttl_field_offset(UINT_MAX), m_prefix_extractor(k.m_prefix_extractor), m_maxlength(k.m_maxlength) { mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); rdb_netbuf_store_index(m_index_number_storage_form, m_index_number); @@ -173,6 +181,13 @@ void Rdb_key_def::setup(const TABLE *const tbl, m_pack_info = reinterpret_cast(my_malloc(size, MYF(0))); + /* + Guaranteed not to error here as checks have been made already during + table creation. + */ + Rdb_key_def::extract_ttl_col(tbl, tbl_def, &m_ttl_column, + &m_ttl_field_offset, true); + size_t max_len = INDEX_NUMBER_SIZE; int unpack_len = 0; int max_part_len = 0; @@ -245,6 +260,18 @@ void Rdb_key_def::setup(const TABLE *const tbl, max_part_len = std::max(max_part_len, m_pack_info[dst_i].m_max_image_len); + /* + Check key part name here, if it matches the TTL column then we store + the offset of the TTL key part here. + */ + if (!m_ttl_column.empty() && + field->check_field_name_match(m_ttl_column.c_str())) { + DBUG_ASSERT(field->real_type() == MYSQL_TYPE_LONGLONG); + DBUG_ASSERT(field->key_type() == HA_KEYTYPE_ULONGLONG); + DBUG_ASSERT(!field->real_maybe_null()); + m_ttl_pk_key_part_offset = dst_i; + } + key_part++; /* For "unique" secondary indexes, pretend they have @@ -286,6 +313,245 @@ void Rdb_key_def::setup(const TABLE *const tbl, } } +/* + Determine if the table has TTL enabled by parsing the table comment. + + @param[IN] table_arg + @param[IN] tbl_def_arg + @param[OUT] ttl_duration Default TTL value parsed from table comment +*/ +uint Rdb_key_def::extract_ttl_duration(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + uint64 *ttl_duration) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(ttl_duration != nullptr); + std::string table_comment(table_arg->s->comment.str, + table_arg->s->comment.length); + + bool ttl_duration_per_part_match_found = false; + std::string ttl_duration_str = Rdb_key_def::parse_comment_for_qualifier( + table_comment, table_arg, tbl_def_arg, &ttl_duration_per_part_match_found, + RDB_TTL_DURATION_QUALIFIER); + + /* If we don't have a ttl duration, nothing to do here. */ + if (ttl_duration_str.empty()) { + return HA_EXIT_SUCCESS; + } + + /* + Catch errors where a non-integral value was used as ttl duration, strtoull + will return 0. + */ + *ttl_duration = std::strtoull(ttl_duration_str.c_str(), nullptr, 0); + if (!*ttl_duration) { + my_error(ER_RDB_TTL_DURATION_FORMAT, MYF(0), ttl_duration_str.c_str()); + return HA_EXIT_FAILURE; + } + + return HA_EXIT_SUCCESS; +} + +/* + Determine if the table has TTL enabled by parsing the table comment. + + @param[IN] table_arg + @param[IN] tbl_def_arg + @param[OUT] ttl_column TTL column in the table + @param[IN] skip_checks Skip validation checks (when called in + setup()) +*/ +uint Rdb_key_def::extract_ttl_col(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + std::string *ttl_column, + uint *ttl_field_offset, bool skip_checks) { + std::string table_comment(table_arg->s->comment.str, + table_arg->s->comment.length); + /* + Check if there is a TTL column specified. Note that this is not required + and if omitted, an 8-byte ttl field will be prepended to each record + implicitly. + */ + bool ttl_col_per_part_match_found = false; + std::string ttl_col_str = Rdb_key_def::parse_comment_for_qualifier( + table_comment, table_arg, tbl_def_arg, &ttl_col_per_part_match_found, + RDB_TTL_COL_QUALIFIER); + + if (skip_checks) { + for (uint i = 0; i < table_arg->s->fields; i++) { + Field *const field = table_arg->field[i]; + if (field->check_field_name_match(ttl_col_str.c_str())) { + *ttl_column = ttl_col_str; + *ttl_field_offset = i; + } + } + return HA_EXIT_SUCCESS; + } + + /* Check if TTL column exists in table */ + if (!ttl_col_str.empty()) { + bool found = false; + for (uint i = 0; i < table_arg->s->fields; i++) { + Field *const field = table_arg->field[i]; + if (field->check_field_name_match(ttl_col_str.c_str()) && + field->real_type() == MYSQL_TYPE_LONGLONG && + field->key_type() == HA_KEYTYPE_ULONGLONG && + !field->real_maybe_null()) { + *ttl_column = ttl_col_str; + *ttl_field_offset = i; + found = true; + break; + } + } + + if (!found) { + my_error(ER_RDB_TTL_COL_FORMAT, MYF(0), ttl_col_str.c_str()); + return HA_EXIT_FAILURE; + } + } + + return HA_EXIT_SUCCESS; +} + +const std::string +Rdb_key_def::gen_qualifier_for_table(const char *const qualifier, + const std::string &partition_name) { + bool has_partition = !partition_name.empty(); + std::string qualifier_str = ""; + + if (!strcmp(qualifier, RDB_CF_NAME_QUALIFIER)) { + return has_partition ? gen_cf_name_qualifier_for_partition(partition_name) + : qualifier_str + RDB_CF_NAME_QUALIFIER + + RDB_QUALIFIER_VALUE_SEP; + } else if (!strcmp(qualifier, RDB_TTL_DURATION_QUALIFIER)) { + return has_partition + ? gen_ttl_duration_qualifier_for_partition(partition_name) + : qualifier_str + RDB_TTL_DURATION_QUALIFIER + + RDB_QUALIFIER_VALUE_SEP; + } else if (!strcmp(qualifier, RDB_TTL_COL_QUALIFIER)) { + return has_partition ? gen_ttl_col_qualifier_for_partition(partition_name) + : qualifier_str + RDB_TTL_COL_QUALIFIER + + RDB_QUALIFIER_VALUE_SEP; + } else { + DBUG_ASSERT(0); + } + + return qualifier_str; +} + +/* + Formats the string and returns the column family name assignment part for a + specific partition. +*/ +const std::string +Rdb_key_def::gen_cf_name_qualifier_for_partition(const std::string &prefix) { + DBUG_ASSERT(!prefix.empty()); + + return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + RDB_CF_NAME_QUALIFIER + + RDB_QUALIFIER_VALUE_SEP; +} + +const std::string Rdb_key_def::gen_ttl_duration_qualifier_for_partition( + const std::string &prefix) { + DBUG_ASSERT(!prefix.empty()); + + return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + + RDB_TTL_DURATION_QUALIFIER + RDB_QUALIFIER_VALUE_SEP; +} + +const std::string +Rdb_key_def::gen_ttl_col_qualifier_for_partition(const std::string &prefix) { + DBUG_ASSERT(!prefix.empty()); + + return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + RDB_TTL_COL_QUALIFIER + + RDB_QUALIFIER_VALUE_SEP; +} + +const std::string Rdb_key_def::parse_comment_for_qualifier( + const std::string &comment, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, bool *per_part_match_found, + const char *const qualifier) { + DBUG_ASSERT(table_arg != nullptr); + DBUG_ASSERT(tbl_def_arg != nullptr); + DBUG_ASSERT(per_part_match_found != nullptr); + DBUG_ASSERT(qualifier != nullptr); + + std::string empty_result; + + // Flag which marks if partition specific options were found. + *per_part_match_found = false; + + if (comment.empty()) { + return empty_result; + } + + // Let's fetch the comment for a index and check if there's a custom key + // name specified for a partition we are handling. + std::vector v = + myrocks::parse_into_tokens(comment, RDB_QUALIFIER_SEP); + + std::string search_str = gen_qualifier_for_table(qualifier); + + // If table has partitions then we need to check if user has requested + // qualifiers on a per partition basis. + // + // NOTE: this means if you specify a qualifier for a specific partition it + // will take precedence the 'table level' qualifier if one exists. + std::string search_str_part; + if (table_arg->part_info != nullptr) { + std::string partition_name = tbl_def_arg->base_partition(); + DBUG_ASSERT(!partition_name.empty()); + search_str_part = gen_qualifier_for_table(qualifier, partition_name); + } + + DBUG_ASSERT(!search_str.empty()); + + // Basic O(N) search for a matching assignment. At most we expect maybe + // ten or so elements here. + if (!search_str_part.empty()) { + for (const auto &it : v) { + if (it.substr(0, search_str_part.length()) == search_str_part) { + // We found a prefix match. Try to parse it as an assignment. + std::vector tokens = + myrocks::parse_into_tokens(it, RDB_QUALIFIER_VALUE_SEP); + + // We found a custom qualifier, it was in the form we expected it to be. + // Return that instead of whatever we initially wanted to return. In + // a case below the `foo` part will be returned to the caller. + // + // p3_cfname=foo + // + // If no value was specified then we'll return an empty string which + // later gets translated into using a default CF. + if (tokens.size() == 2) { + *per_part_match_found = true; + return tokens[1]; + } else { + return empty_result; + } + } + } + } + + // Do this loop again, this time searching for 'table level' qualifiers if we + // didn't find any partition level qualifiers above. + for (const auto &it : v) { + if (it.substr(0, search_str.length()) == search_str) { + std::vector tokens = + myrocks::parse_into_tokens(it, RDB_QUALIFIER_VALUE_SEP); + if (tokens.size() == 2) { + return tokens[1]; + } else { + return empty_result; + } + } + } + + // If we didn't find any partitioned/non-partitioned qualifiers, return an + // empty string. + return empty_result; +} + /** Read a memcmp key part from a slice using the passed in reader. @@ -317,7 +583,7 @@ int Rdb_key_def::read_memcmp_key_part(const TABLE *table_arg, Field *field = nullptr; if (!is_hidden_pk_part) field = fpi->get_field_in_table(table_arg); - if (fpi->m_skip_func(fpi, field, reader)) + if ((this->*fpi->m_skip_func)(fpi, field, reader)) return 1; return 0; @@ -517,15 +783,11 @@ int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) { return changed; } -uchar *Rdb_key_def::pack_field( - Field *const field, - Rdb_field_packing *pack_info, - uchar * tuple, - uchar *const packed_tuple, - uchar *const pack_buffer, - Rdb_string_writer *const unpack_info, - uint *const n_null_fields) const -{ +uchar *Rdb_key_def::pack_field(Field *const field, Rdb_field_packing *pack_info, + uchar *tuple, uchar *const packed_tuple, + uchar *const pack_buffer, + Rdb_string_writer *const unpack_info, + uint *const n_null_fields) const { if (field->real_maybe_null()) { DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1)); if (field->is_real_null()) { @@ -550,12 +812,13 @@ uchar *Rdb_key_def::pack_field( DBUG_ASSERT(is_storage_available(tuple - packed_tuple, pack_info->m_max_image_len)); - pack_info->m_pack_func(pack_info, field, pack_buffer, &tuple, &pack_ctx); + (this->*pack_info->m_pack_func)(pack_info, field, pack_buffer, &tuple, + &pack_ctx); /* Make "unpack info" to be stored in the value */ if (create_unpack_info) { - pack_info->m_make_unpack_info_func(pack_info->m_charset_codec, field, - &pack_ctx); + (this->*pack_info->m_make_unpack_info_func)(pack_info->m_charset_codec, + field, &pack_ctx); } return tuple; @@ -574,6 +837,7 @@ uchar *Rdb_key_def::pack_field( unpack_info_len OUT Unpack data length n_key_parts Number of keyparts to process. 0 means all of them. n_null_fields OUT Number of key fields with NULL value. + ttl_pk_offset OUT Offset of the ttl column if specified and in the key @detail Some callers do not need the unpack information, they can pass @@ -589,7 +853,8 @@ uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer, Rdb_string_writer *const unpack_info, const bool &should_store_row_debug_checksums, const longlong &hidden_pk_id, uint n_key_parts, - uint *const n_null_fields) const { + uint *const n_null_fields, + uint *const ttl_pk_offset) const { DBUG_ASSERT(tbl != nullptr); DBUG_ASSERT(pack_buffer != nullptr); DBUG_ASSERT(record != nullptr); @@ -644,6 +909,17 @@ uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer, uint field_offset = field->ptr - tbl->record[0]; uint null_offset = field->null_offset(tbl->record[0]); bool maybe_null = field->real_maybe_null(); + + // Save the ttl duration offset in the key so we can store it in front of + // the record later. + if (ttl_pk_offset && m_ttl_duration > 0 && i == m_ttl_pk_key_part_offset) { + DBUG_ASSERT(field->check_field_name_match(m_ttl_column.c_str())); + DBUG_ASSERT(field->real_type() == MYSQL_TYPE_LONGLONG); + DBUG_ASSERT(field->key_type() == HA_KEYTYPE_ULONGLONG); + DBUG_ASSERT(!field->real_maybe_null()); + *ttl_pk_offset = tuple - packed_tuple; + } + field->move_field(const_cast(record) + field_offset, maybe_null ? const_cast(record) + null_offset : nullptr, field->null_bit); @@ -729,10 +1005,10 @@ uint Rdb_key_def::pack_hidden_pk(const longlong &hidden_pk_id, Function of type rdb_index_field_pack_t */ -void rdb_pack_with_make_sort_key( +void Rdb_key_def::pack_with_make_sort_key( Rdb_field_packing *const fpi, Field *const field, uchar *const buf MY_ATTRIBUTE((__unused__)), uchar **dst, - Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) { + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) const { DBUG_ASSERT(fpi != nullptr); DBUG_ASSERT(field != nullptr); DBUG_ASSERT(dst != nullptr); @@ -797,9 +1073,9 @@ int Rdb_key_def::compare_keys(const rocksdb::Slice *key1, const auto before_skip1 = reader1.get_current_ptr(); const auto before_skip2 = reader2.get_current_ptr(); DBUG_ASSERT(fpi->m_skip_func); - if (fpi->m_skip_func(fpi, nullptr, &reader1)) + if ((this->*fpi->m_skip_func)(fpi, nullptr, &reader1)) return HA_EXIT_FAILURE; - if (fpi->m_skip_func(fpi, nullptr, &reader2)) + if ((this->*fpi->m_skip_func)(fpi, nullptr, &reader2)) return HA_EXIT_FAILURE; const auto size1 = reader1.get_current_ptr() - before_skip1; const auto size2 = reader2.get_current_ptr() - before_skip2; @@ -840,7 +1116,7 @@ size_t Rdb_key_def::key_length(const TABLE *const table, const Field *field = nullptr; if (m_index_type != INDEX_TYPE_HIDDEN_PRIMARY) field = fpi->get_field_in_table(table); - if (fpi->m_skip_func(fpi, field, &reader)) + if ((this->*fpi->m_skip_func)(fpi, field, &reader)) return size_t(-1); } return key.size() - reader.remaining_bytes(); @@ -872,7 +1148,8 @@ int Rdb_key_def::unpack_field( } } - return fpi->m_unpack_func(fpi, field, field->ptr, reader, unp_reader); + return (this->*fpi->m_unpack_func)(fpi, field, field->ptr, reader, + unp_reader); } /* @@ -882,8 +1159,8 @@ int Rdb_key_def::unpack_field( not all indexes support this @return - UNPACK_SUCCESS - Ok - UNPACK_FAILURE - Data format error. + HA_EXIT_SUCCESS OK + other HA_ERR error code */ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, @@ -903,7 +1180,7 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, // Skip the index number if ((!reader.read(INDEX_NUMBER_SIZE))) { - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } // For secondary keys, we expect the value field to contain unpack data and @@ -913,7 +1190,7 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, unp_reader.remaining_bytes() && *unp_reader.get_current_ptr() == RDB_UNPACK_DATA_TAG; if (has_unpack_info && !unp_reader.read(RDB_UNPACK_HEADER_SIZE)) { - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } for (uint i = 0; i < m_key_parts; i++) { @@ -926,8 +1203,8 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, if ((secondary_key && hidden_pk_exists && i + 1 == m_key_parts) || is_hidden_pk) { DBUG_ASSERT(fpi->m_unpack_func); - if (fpi->m_skip_func(fpi, nullptr, &reader)) { - return HA_EXIT_FAILURE; + if ((this->*fpi->m_skip_func)(fpi, nullptr, &reader)) { + return HA_ERR_ROCKSDB_CORRUPT_DATA; } continue; } @@ -959,25 +1236,25 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, maybe_null ? table->record[0] + null_offset : nullptr, field->null_bit); - if (res) { - return res; + if (res != UNPACK_SUCCESS) { + return HA_ERR_ROCKSDB_CORRUPT_DATA; } } else { /* It is impossible to unpack the column. Skip it. */ if (fpi->m_maybe_null) { const char *nullp; if (!(nullp = reader.read(1))) - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CORRUPT_DATA; if (*nullp == 0) { /* This is a NULL value */ continue; } /* If NULL marker is not '0', it can be only '1' */ if (*nullp != 1) - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CORRUPT_DATA; } - if (fpi->m_skip_func(fpi, field, &reader)) - return HA_EXIT_FAILURE; + if ((this->*fpi->m_skip_func)(fpi, field, &reader)) + return HA_ERR_ROCKSDB_CORRUPT_DATA; } } @@ -1003,13 +1280,13 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, if (stored_key_chksum != computed_key_chksum) { report_checksum_mismatch(true, packed_key->data(), packed_key->size()); - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CHECKSUM_MISMATCH; } if (stored_val_chksum != computed_val_chksum) { report_checksum_mismatch(false, unpack_info->data(), unpack_info->size() - RDB_CHECKSUM_CHUNK_SIZE); - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CHECKSUM_MISMATCH; } } else { /* The checksums are present but we are not checking checksums */ @@ -1017,7 +1294,7 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, } if (reader.remaining_bytes()) - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_CORRUPT_DATA; return HA_EXIT_SUCCESS; } @@ -1063,9 +1340,10 @@ bool Rdb_key_def::index_format_min_check(const int &pk_min, Function of type rdb_index_field_skip_t */ -int rdb_skip_max_length(const Rdb_field_packing *const fpi, - const Field *const field MY_ATTRIBUTE((__unused__)), - Rdb_string_reader *const reader) { +int Rdb_key_def::skip_max_length(const Rdb_field_packing *const fpi, + const Field *const field + MY_ATTRIBUTE((__unused__)), + Rdb_string_reader *const reader) const { if (!reader->read(fpi->m_max_image_len)) return HA_EXIT_FAILURE; return HA_EXIT_SUCCESS; @@ -1074,20 +1352,29 @@ int rdb_skip_max_length(const Rdb_field_packing *const fpi, /* (RDB_ESCAPE_LENGTH-1) must be an even number so that pieces of lines are not split in the middle of an UTF-8 character. See the implementation of - rdb_unpack_binary_or_utf8_varchar. + unpack_binary_or_utf8_varchar. */ -const uint RDB_ESCAPE_LENGTH = 9; +#define RDB_ESCAPE_LENGTH 9 +#define RDB_LEGACY_ESCAPE_LENGTH RDB_ESCAPE_LENGTH static_assert((RDB_ESCAPE_LENGTH - 1) % 2 == 0, "RDB_ESCAPE_LENGTH-1 must be even."); +#define RDB_ENCODED_SIZE(len) \ + ((len + (RDB_ESCAPE_LENGTH - 2)) / (RDB_ESCAPE_LENGTH - 1)) * \ + RDB_ESCAPE_LENGTH + +#define RDB_LEGACY_ENCODED_SIZE(len) \ + ((len + (RDB_LEGACY_ESCAPE_LENGTH - 1)) / (RDB_LEGACY_ESCAPE_LENGTH - 1)) * \ + RDB_LEGACY_ESCAPE_LENGTH + /* Function of type rdb_index_field_skip_t */ -static int rdb_skip_variable_length( +int Rdb_key_def::skip_variable_length( const Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), - const Field *const field, Rdb_string_reader *const reader) { + const Field *const field, Rdb_string_reader *const reader) const { const uchar *ptr; bool finished = false; @@ -1100,21 +1387,29 @@ static int rdb_skip_variable_length( dst_len = UINT_MAX; } + bool use_legacy_format = use_legacy_varbinary_format(); + /* Decode the length-emitted encoding here */ while ((ptr = (const uchar *)reader->read(RDB_ESCAPE_LENGTH))) { - /* See rdb_pack_with_varchar_encoding. */ - const uchar pad = - 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes - const uchar used_bytes = RDB_ESCAPE_LENGTH - 1 - pad; + uint used_bytes; - if (used_bytes > RDB_ESCAPE_LENGTH - 1 || used_bytes > dst_len) { - return HA_EXIT_FAILURE; /* cannot store that much, invalid data */ + /* See pack_with_varchar_encoding. */ + if (use_legacy_format) { + used_bytes = calc_unpack_legacy_variable_format( + ptr[RDB_ESCAPE_LENGTH - 1], &finished); + } else { + used_bytes = + calc_unpack_variable_format(ptr[RDB_ESCAPE_LENGTH - 1], &finished); } - if (used_bytes < RDB_ESCAPE_LENGTH - 1) { - finished = true; + if (used_bytes == (uint)-1 || dst_len < used_bytes) { + return HA_EXIT_FAILURE; // Corruption in the data + } + + if (finished) { break; } + dst_len -= used_bytes; } @@ -1133,9 +1428,9 @@ const int VARCHAR_CMP_GREATER_THAN_SPACES = 3; Skip a keypart that uses Variable-Length Space-Padded encoding */ -static int rdb_skip_variable_space_pad(const Rdb_field_packing *const fpi, - const Field *const field, - Rdb_string_reader *const reader) { +int Rdb_key_def::skip_variable_space_pad( + const Rdb_field_packing *const fpi, const Field *const field, + Rdb_string_reader *const reader) const { const uchar *ptr; bool finished = false; @@ -1149,7 +1444,7 @@ static int rdb_skip_variable_space_pad(const Rdb_field_packing *const fpi, /* Decode the length-emitted encoding here */ while ((ptr = (const uchar *)reader->read(fpi->m_segment_size))) { - // See rdb_pack_with_varchar_space_pad + // See pack_with_varchar_space_pad const uchar c = ptr[fpi->m_segment_size - 1]; if (c == VARCHAR_CMP_EQUAL_TO_SPACES) { // This is the last segment @@ -1177,10 +1472,10 @@ static int rdb_skip_variable_space_pad(const Rdb_field_packing *const fpi, Function of type rdb_index_field_unpack_t */ -int rdb_unpack_integer(Rdb_field_packing *const fpi, Field *const field, - uchar *const to, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader - MY_ATTRIBUTE((__unused__))) { +int Rdb_key_def::unpack_integer( + Rdb_field_packing *const fpi, Field *const field, uchar *const to, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { const int length = fpi->m_max_image_len; const uchar *from; @@ -1245,10 +1540,11 @@ static void rdb_swap_float_bytes(uchar *const dst, const uchar *const src) { #define rdb_swap_float_bytes nullptr #endif -static int rdb_unpack_floating_point( +int Rdb_key_def::unpack_floating_point( uchar *const dst, Rdb_string_reader *const reader, const size_t &size, const int &exp_digit, const uchar *const zero_pattern, - const uchar *const zero_val, void (*swap_func)(uchar *, const uchar *)) { + const uchar *const zero_val, + void (*swap_func)(uchar *, const uchar *)) const { const uchar *const from = (const uchar *)reader->read(size); if (from == nullptr) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ @@ -1288,7 +1584,7 @@ static int rdb_unpack_floating_point( // On little-endian, swap the bytes around swap_func(dst, tmp); #else - static_assert(swap_func == nullptr, "Assuming that no swapping is needed."); + DBUG_ASSERT(swap_func == nullptr); #endif return UNPACK_SUCCESS; @@ -1306,17 +1602,17 @@ static int rdb_unpack_floating_point( Note also that this code assumes that NaN and +/-Infinity are never allowed in the database. */ -static int rdb_unpack_double( +int Rdb_key_def::unpack_double( Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), Field *const field MY_ATTRIBUTE((__unused__)), uchar *const field_ptr, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { static double zero_val = 0.0; static const uchar zero_pattern[8] = {128, 0, 0, 0, 0, 0, 0, 0}; - return rdb_unpack_floating_point( - field_ptr, reader, sizeof(double), DBL_EXP_DIG, zero_pattern, - (const uchar *)&zero_val, rdb_swap_double_bytes); + return unpack_floating_point(field_ptr, reader, sizeof(double), DBL_EXP_DIG, + zero_pattern, (const uchar *)&zero_val, + rdb_swap_double_bytes); } #if !defined(FLT_EXP_DIG) @@ -1331,16 +1627,16 @@ static int rdb_unpack_double( Note also that this code assumes that NaN and +/-Infinity are never allowed in the database. */ -static int rdb_unpack_float( - Rdb_field_packing *const, Field *const field MY_ATTRIBUTE((__unused__)), +int Rdb_key_def::unpack_float( + Rdb_field_packing *const fpi, Field *const field MY_ATTRIBUTE((__unused__)), uchar *const field_ptr, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { static float zero_val = 0.0; static const uchar zero_pattern[4] = {128, 0, 0, 0}; - return rdb_unpack_floating_point( - field_ptr, reader, sizeof(float), FLT_EXP_DIG, zero_pattern, - (const uchar *)&zero_val, rdb_swap_float_bytes); + return unpack_floating_point(field_ptr, reader, sizeof(float), FLT_EXP_DIG, + zero_pattern, (const uchar *)&zero_val, + rdb_swap_float_bytes); } /* @@ -1348,10 +1644,10 @@ static int rdb_unpack_float( Unpack by doing the reverse action to Field_newdate::make_sort_key. */ -int rdb_unpack_newdate(Rdb_field_packing *const fpi, Field *constfield, - uchar *const field_ptr, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader - MY_ATTRIBUTE((__unused__))) { +int Rdb_key_def::unpack_newdate( + Rdb_field_packing *const fpi, Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { const char *from; DBUG_ASSERT(fpi->m_max_image_len == 3); @@ -1370,10 +1666,10 @@ int rdb_unpack_newdate(Rdb_field_packing *const fpi, Field *constfield, This is for BINARY(n) where the value occupies the whole length. */ -static int rdb_unpack_binary_str( +int Rdb_key_def::unpack_binary_str( Rdb_field_packing *const fpi, Field *const field, uchar *const to, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { const char *from; if (!(from = reader->read(fpi->m_max_image_len))) return UNPACK_FAILURE; /* Mem-comparable image doesn't have enough bytes */ @@ -1388,10 +1684,10 @@ static int rdb_unpack_binary_str( UTF8 sequences. */ -static int rdb_unpack_utf8_str(Rdb_field_packing *const fpi, Field *const field, - uchar *dst, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader - MY_ATTRIBUTE((__unused__))) { +int Rdb_key_def::unpack_utf8_str( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { my_core::CHARSET_INFO *const cset = (my_core::CHARSET_INFO *)field->charset(); const uchar *src; if (!(src = (const uchar *)reader->read(fpi->m_max_image_len))) @@ -1415,23 +1711,119 @@ static int rdb_unpack_utf8_str(Rdb_field_packing *const fpi, Field *const field, return UNPACK_SUCCESS; } +/* + This is the original algorithm to encode a variable binary field. It + sets a flag byte every Nth byte. The flag value is (255 - #pad) where + #pad is the number of padding bytes that were needed (0 if all N-1 + bytes were used). + + If N=8 and the field is: + * 3 bytes (1, 2, 3) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 251 + * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252 + And the 4 byte string compares as greater than the 3 byte string + + Unfortunately the algorithm has a flaw. If the input is exactly a + multiple of N-1, an extra N bytes are written. Since we usually use + N=9, an 8 byte input will generate 18 bytes of output instead of the + 9 bytes of output that is optimal. + + See pack_variable_format for the newer algorithm. +*/ +void Rdb_key_def::pack_legacy_variable_format( + const uchar *src, // The data to encode + size_t src_len, // The length of the data to encode + uchar **dst) const // The location to encode the data +{ + size_t copy_len; + size_t padding_bytes; + uchar *ptr = *dst; + + do { + copy_len = std::min((size_t)RDB_LEGACY_ESCAPE_LENGTH - 1, src_len); + padding_bytes = RDB_LEGACY_ESCAPE_LENGTH - 1 - copy_len; + memcpy(ptr, src, copy_len); + ptr += copy_len; + src += copy_len; + // pad with zeros if necessary + if (padding_bytes > 0) { + memset(ptr, 0, padding_bytes); + ptr += padding_bytes; + } + + *(ptr++) = 255 - padding_bytes; + + src_len -= copy_len; + } while (padding_bytes == 0); + + *dst = ptr; +} + +/* + This is the new algorithm. Similarly to the legacy format the input + is split up into N-1 bytes and a flag byte is used as the Nth byte + in the output. + + - If the previous segment needed any padding the flag is set to the + number of bytes used (0..N-2). 0 is possible in the first segment + if the input is 0 bytes long. + - If no padding was used and there is no more data left in the input + the flag is set to N-1 + - If no padding was used and there is still data left in the input the + flag is set to N. + + For N=9, the following input values encode to the specified + outout (where 'X' indicates a byte of the original input): + - 0 bytes is encoded as 0 0 0 0 0 0 0 0 0 + - 1 byte is encoded as X 0 0 0 0 0 0 0 1 + - 2 bytes is encoded as X X 0 0 0 0 0 0 2 + - 7 bytes is encoded as X X X X X X X 0 7 + - 8 bytes is encoded as X X X X X X X X 8 + - 9 bytes is encoded as X X X X X X X X 9 X 0 0 0 0 0 0 0 1 + - 10 bytes is encoded as X X X X X X X X 9 X X 0 0 0 0 0 0 2 +*/ +void Rdb_key_def::pack_variable_format( + const uchar *src, // The data to encode + size_t src_len, // The length of the data to encode + uchar **dst) const // The location to encode the data +{ + uchar *ptr = *dst; + + for (;;) { + // Figure out how many bytes to copy, copy them and adjust pointers + const size_t copy_len = std::min((size_t)RDB_ESCAPE_LENGTH - 1, src_len); + memcpy(ptr, src, copy_len); + ptr += copy_len; + src += copy_len; + src_len -= copy_len; + + // Are we at the end of the input? + if (src_len == 0) { + // pad with zeros if necessary; + const size_t padding_bytes = RDB_ESCAPE_LENGTH - 1 - copy_len; + if (padding_bytes > 0) { + memset(ptr, 0, padding_bytes); + ptr += padding_bytes; + } + + // Put the flag byte (0 - N-1) in the output + *(ptr++) = (uchar)copy_len; + break; + } + + // We have more data - put the flag byte (N) in and continue + *(ptr++) = RDB_ESCAPE_LENGTH; + } + + *dst = ptr; +} + /* Function of type rdb_index_field_pack_t */ -static void rdb_pack_with_varchar_encoding( +void Rdb_key_def::pack_with_varchar_encoding( Rdb_field_packing *const fpi, Field *const field, uchar *buf, uchar **dst, - Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) { - /* - Use a flag byte every Nth byte. Set it to (255 - #pad) where #pad is 0 - when the var length field filled all N-1 previous bytes and #pad is - otherwise the number of padding bytes used. - - If N=8 and the field is: - * 3 bytes (1, 2, 3) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 251 - * 4 bytes (1, 2, 3, 0) this is encoded as: 1, 2, 3, 0, 0, 0, 0, 252 - And the 4 byte string compares as greater than the 3 byte string - */ + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) const { const CHARSET_INFO *const charset = field->charset(); Field_varstring *const field_var = (Field_varstring *)field; @@ -1443,26 +1835,11 @@ static void rdb_pack_with_varchar_encoding( field_var->ptr + field_var->length_bytes, value_length, 0); /* Got a mem-comparable image in 'buf'. Now, produce varlength encoding */ - - size_t encoded_size = 0; - uchar *ptr = *dst; - while (1) { - const size_t copy_len = std::min((size_t)RDB_ESCAPE_LENGTH - 1, xfrm_len); - const size_t padding_bytes = RDB_ESCAPE_LENGTH - 1 - copy_len; - memcpy(ptr, buf, copy_len); - ptr += copy_len; - buf += copy_len; - // pad with zeros if necessary; - for (size_t idx = 0; idx < padding_bytes; idx++) - *(ptr++) = 0; - *(ptr++) = 255 - padding_bytes; - - xfrm_len -= copy_len; - encoded_size += RDB_ESCAPE_LENGTH; - if (padding_bytes != 0) - break; + if (use_legacy_varbinary_format()) { + pack_legacy_variable_format(buf, xfrm_len, dst); + } else { + pack_variable_format(buf, xfrm_len, dst); } - *dst += encoded_size; } /* @@ -1549,16 +1926,15 @@ static const int RDB_TRIMMED_CHARS_OFFSET = 8; then store it as unsigned. @seealso - rdb_unpack_binary_or_utf8_varchar_space_pad - rdb_unpack_simple_varchar_space_pad - rdb_dummy_make_unpack_info - rdb_skip_variable_space_pad + unpack_binary_or_utf8_varchar_space_pad + unpack_simple_varchar_space_pad + dummy_make_unpack_info + skip_variable_space_pad */ -static void -rdb_pack_with_varchar_space_pad(Rdb_field_packing *const fpi, - Field *const field, uchar *buf, uchar **dst, - Rdb_pack_field_context *const pack_ctx) { +void Rdb_key_def::pack_with_varchar_space_pad( + Rdb_field_packing *const fpi, Field *const field, uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx) const { Rdb_string_writer *const unpack_info = pack_ctx->writer; const CHARSET_INFO *const charset = field->charset(); const auto field_var = static_cast(field); @@ -1639,14 +2015,93 @@ rdb_pack_with_varchar_space_pad(Rdb_field_packing *const fpi, *dst += encoded_size; } +/* + Calculate the number of used bytes in the chunk and whether this is the + last chunk in the input. This is based on the old legacy format - see + pack_legacy_variable_format. + */ +uint Rdb_key_def::calc_unpack_legacy_variable_format(uchar flag, + bool *done) const { + uint pad = 255 - flag; + uint used_bytes = RDB_LEGACY_ESCAPE_LENGTH - 1 - pad; + if (used_bytes > RDB_LEGACY_ESCAPE_LENGTH - 1) { + return (uint)-1; + } + + *done = used_bytes < RDB_LEGACY_ESCAPE_LENGTH - 1; + return used_bytes; +} + +/* + Calculate the number of used bytes in the chunk and whether this is the + last chunk in the input. This is based on the new format - see + pack_variable_format. + */ +uint Rdb_key_def::calc_unpack_variable_format(uchar flag, bool *done) const { + // Check for invalid flag values + if (flag > RDB_ESCAPE_LENGTH) { + return (uint)-1; + } + + // Values from 1 to N-1 indicate this is the last chunk and that is how + // many bytes were used + if (flag < RDB_ESCAPE_LENGTH) { + *done = true; + return flag; + } + + // A value of N means we used N-1 bytes and had more to go + *done = false; + return RDB_ESCAPE_LENGTH - 1; +} + +/* + Unpack data that has charset information. Each two bytes of the input is + treated as a wide-character and converted to its multibyte equivalent in + the output. + */ +static int +unpack_charset(const CHARSET_INFO *cset, // character set information + const uchar *src, // source data to unpack + uint src_len, // length of source data + uchar *dst, // destination of unpacked data + uint dst_len, // length of destination data + uint *used_bytes) // output number of bytes used +{ + if (src_len & 1) { + /* + UTF-8 characters are encoded into two-byte entities. There is no way + we can have an odd number of bytes after encoding. + */ + return UNPACK_FAILURE; + } + + uchar *dst_end = dst + dst_len; + uint used = 0; + + for (uint ii = 0; ii < src_len; ii += 2) { + my_wc_t wc = (src[ii] << 8) | src[ii + 1]; + int res = cset->cset->wc_mb(cset, wc, dst + used, dst_end); + DBUG_ASSERT(res > 0 && res <= 3); + if (res < 0) { + return UNPACK_FAILURE; + } + + used += res; + } + + *used_bytes = used; + return UNPACK_SUCCESS; +} + /* Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_binary_or_utf8_varchar( +int Rdb_key_def::unpack_binary_or_utf8_varchar( Rdb_field_packing *const fpi, Field *const field, uchar *dst, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) { + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const { const uchar *ptr; size_t len = 0; bool finished = false; @@ -1655,64 +2110,51 @@ static int rdb_unpack_binary_or_utf8_varchar( dst += field_var->length_bytes; // How much we can unpack size_t dst_len = field_var->pack_length() - field_var->length_bytes; - uchar *const dst_end = dst + dst_len; + + bool use_legacy_format = use_legacy_varbinary_format(); /* Decode the length-emitted encoding here */ while ((ptr = (const uchar *)reader->read(RDB_ESCAPE_LENGTH))) { - /* See rdb_pack_with_varchar_encoding. */ - uchar pad = 255 - ptr[RDB_ESCAPE_LENGTH - 1]; // number of padding bytes - uchar used_bytes = RDB_ESCAPE_LENGTH - 1 - pad; + uint used_bytes; - if (used_bytes > RDB_ESCAPE_LENGTH - 1) { - return UNPACK_FAILURE; /* cannot store that much, invalid data */ + /* See pack_with_varchar_encoding. */ + if (use_legacy_format) { + used_bytes = calc_unpack_legacy_variable_format( + ptr[RDB_ESCAPE_LENGTH - 1], &finished); + } else { + used_bytes = + calc_unpack_variable_format(ptr[RDB_ESCAPE_LENGTH - 1], &finished); } - if (dst_len < used_bytes) { - /* Encoded index tuple is longer than the size in the record buffer? */ - return UNPACK_FAILURE; + if (used_bytes == (uint)-1 || dst_len < used_bytes) { + return UNPACK_FAILURE; // Corruption in the data } /* Now, we need to decode used_bytes of data and append them to the value. */ if (fpi->m_varchar_charset == &my_charset_utf8_bin) { - if (used_bytes & 1) { - /* - UTF-8 characters are encoded into two-byte entities. There is no way - we can have an odd number of bytes after encoding. - */ - return UNPACK_FAILURE; - } - - const uchar *src = ptr; - const uchar *src_end = ptr + used_bytes; - while (src < src_end) { - my_wc_t wc = (src[0] << 8) | src[1]; - src += 2; - const CHARSET_INFO *cset = fpi->m_varchar_charset; - int res = cset->cset->wc_mb(cset, wc, dst, dst_end); - DBUG_ASSERT(res > 0 && res <= 3); - if (res < 0) - return UNPACK_FAILURE; - dst += res; - len += res; - dst_len -= res; + int err = unpack_charset(fpi->m_varchar_charset, ptr, used_bytes, dst, + dst_len, &used_bytes); + if (err != UNPACK_SUCCESS) { + return err; } } else { memcpy(dst, ptr, used_bytes); - dst += used_bytes; - dst_len -= used_bytes; - len += used_bytes; } - if (used_bytes < RDB_ESCAPE_LENGTH - 1) { - finished = true; + dst += used_bytes; + dst_len -= used_bytes; + len += used_bytes; + + if (finished) { break; } } - if (!finished) + if (!finished) { return UNPACK_FAILURE; + } /* Save the length */ if (field_var->length_bytes == 1) { @@ -1726,14 +2168,15 @@ static int rdb_unpack_binary_or_utf8_varchar( /* @seealso - rdb_pack_with_varchar_space_pad - packing function - rdb_unpack_simple_varchar_space_pad - unpacking function for 'simple' + pack_with_varchar_space_pad - packing function + unpack_simple_varchar_space_pad - unpacking function for 'simple' charsets. - rdb_skip_variable_space_pad - skip function + skip_variable_space_pad - skip function */ -static int rdb_unpack_binary_or_utf8_varchar_space_pad( +int Rdb_key_def::unpack_binary_or_utf8_varchar_space_pad( Rdb_field_packing *const fpi, Field *const field, uchar *dst, - Rdb_string_reader *const reader, Rdb_string_reader *const unp_reader) { + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const { const uchar *ptr; size_t len = 0; bool finished = false; @@ -1840,9 +2283,9 @@ static int rdb_unpack_binary_or_utf8_varchar_space_pad( Function of type rdb_make_unpack_info_t */ -static void rdb_make_unpack_unknown( +void Rdb_key_def::make_unpack_unknown( const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), - const Field *const field, Rdb_pack_field_context *const pack_ctx) { + const Field *const field, Rdb_pack_field_context *const pack_ctx) const { pack_ctx->writer->write(field->ptr, field->pack_length()); } @@ -1851,25 +2294,28 @@ static void rdb_make_unpack_unknown( available. The actual unpack_info data is produced by the function that packs the key, - that is, rdb_pack_with_varchar_space_pad. + that is, pack_with_varchar_space_pad. */ -static void rdb_dummy_make_unpack_info( +void Rdb_key_def::dummy_make_unpack_info( const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), const Field *field MY_ATTRIBUTE((__unused__)), - Rdb_pack_field_context *pack_ctx MY_ATTRIBUTE((__unused__))) {} + Rdb_pack_field_context *pack_ctx MY_ATTRIBUTE((__unused__))) const { + // Do nothing +} /* Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_unknown(Rdb_field_packing *const fpi, Field *const field, - uchar *const dst, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader) { +int Rdb_key_def::unpack_unknown(Rdb_field_packing *const fpi, + Field *const field, uchar *const dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const { const uchar *ptr; const uint len = fpi->m_unpack_data_len; // We don't use anything from the key, so skip over it. - if (rdb_skip_max_length(fpi, field, reader)) { + if (skip_max_length(fpi, field, reader)) { return UNPACK_FAILURE; } @@ -1886,9 +2332,9 @@ static int rdb_unpack_unknown(Rdb_field_packing *const fpi, Field *const field, Function of type rdb_make_unpack_info_t */ -static void rdb_make_unpack_unknown_varchar( +void Rdb_key_def::make_unpack_unknown_varchar( const Rdb_collation_codec *const codec MY_ATTRIBUTE((__unused__)), - const Field *const field, Rdb_pack_field_context *const pack_ctx) { + const Field *const field, Rdb_pack_field_context *const pack_ctx) const { const auto f = static_cast(field); uint len = f->length_bytes == 1 ? (uint)*f->ptr : uint2korr(f->ptr); len += f->length_bytes; @@ -1906,20 +2352,20 @@ static void rdb_make_unpack_unknown_varchar( the original string, so we keep the whole original string in the unpack_info. @seealso - rdb_make_unpack_unknown, rdb_unpack_unknown + make_unpack_unknown, unpack_unknown */ -static int rdb_unpack_unknown_varchar(Rdb_field_packing *const fpi, - Field *const field, uchar *dst, - Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader) { +int Rdb_key_def::unpack_unknown_varchar( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const { const uchar *ptr; uchar *const d0 = dst; const auto f = static_cast(field); dst += f->length_bytes; const uint len_bytes = f->length_bytes; // We don't use anything from the key, so skip over it. - if (fpi->m_skip_func(fpi, field, reader)) { + if ((this->*fpi->m_skip_func)(fpi, field, reader)) { return UNPACK_FAILURE; } @@ -1977,10 +2423,9 @@ static uint rdb_read_unpack_simple(Rdb_bit_reader *const reader, Make unpack_data for VARCHAR(n) in a "simple" charset. */ -static void -rdb_make_unpack_simple_varchar(const Rdb_collation_codec *const codec, - const Field *const field, - Rdb_pack_field_context *const pack_ctx) { +void Rdb_key_def::make_unpack_simple_varchar( + const Rdb_collation_codec *const codec, const Field *const field, + Rdb_pack_field_context *const pack_ctx) const { const auto f = static_cast(field); uchar *const src = f->ptr + f->length_bytes; const size_t src_len = @@ -1996,14 +2441,14 @@ rdb_make_unpack_simple_varchar(const Rdb_collation_codec *const codec, Function of type rdb_index_field_unpack_t @seealso - rdb_pack_with_varchar_space_pad - packing function - rdb_unpack_binary_or_utf8_varchar_space_pad - a similar unpacking function + pack_with_varchar_space_pad - packing function + unpack_binary_or_utf8_varchar_space_pad - a similar unpacking function */ -int rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *const fpi, - Field *const field, uchar *dst, - Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader) { +int Rdb_key_def::unpack_simple_varchar_space_pad( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const { const uchar *ptr; size_t len = 0; bool finished = false; @@ -2101,12 +2546,12 @@ int rdb_unpack_simple_varchar_space_pad(Rdb_field_packing *const fpi, It is CHAR(N), so SQL layer has padded the value with spaces up to N chars. @seealso - The VARCHAR variant is in rdb_make_unpack_simple_varchar + The VARCHAR variant is in make_unpack_simple_varchar */ -static void rdb_make_unpack_simple(const Rdb_collation_codec *const codec, - const Field *const field, - Rdb_pack_field_context *const pack_ctx) { +void Rdb_key_def::make_unpack_simple( + const Rdb_collation_codec *const codec, const Field *const field, + Rdb_pack_field_context *const pack_ctx) const { const uchar *const src = field->ptr; Rdb_bit_writer bit_writer(pack_ctx->writer); rdb_write_unpack_simple(&bit_writer, codec, src, field->pack_length()); @@ -2116,10 +2561,11 @@ static void rdb_make_unpack_simple(const Rdb_collation_codec *const codec, Function of type rdb_index_field_unpack_t */ -static int rdb_unpack_simple(Rdb_field_packing *const fpi, - Field *const field MY_ATTRIBUTE((__unused__)), - uchar *const dst, Rdb_string_reader *const reader, - Rdb_string_reader *const unp_reader) { +int Rdb_key_def::unpack_simple(Rdb_field_packing *const fpi, + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const { const uchar *ptr; const uint len = fpi->m_max_image_len; Rdb_bit_reader bit_reader(unp_reader); @@ -2265,9 +2711,10 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) { } cur->m_make_unpack_info_func = { - {rdb_make_unpack_simple_varchar, rdb_make_unpack_simple}}; - cur->m_unpack_func = { - {rdb_unpack_simple_varchar_space_pad, rdb_unpack_simple}}; + &Rdb_key_def::make_unpack_simple_varchar, + &Rdb_key_def::make_unpack_simple}; + cur->m_unpack_func = {&Rdb_key_def::unpack_simple_varchar_space_pad, + &Rdb_key_def::unpack_simple}; } else { // Out of luck for now. } @@ -2354,8 +2801,8 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, /* Calculate image length. By default, is is pack_length() */ m_max_image_len = field ? field->pack_length() : ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN; - m_skip_func = rdb_skip_max_length; - m_pack_func = rdb_pack_with_make_sort_key; + m_skip_func = &Rdb_key_def::skip_max_length; + m_pack_func = &Rdb_key_def::pack_with_make_sort_key; switch (type) { case MYSQL_TYPE_LONGLONG: @@ -2363,15 +2810,15 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, case MYSQL_TYPE_INT24: case MYSQL_TYPE_SHORT: case MYSQL_TYPE_TINY: - m_unpack_func = rdb_unpack_integer; + m_unpack_func = &Rdb_key_def::unpack_integer; return true; case MYSQL_TYPE_DOUBLE: - m_unpack_func = rdb_unpack_double; + m_unpack_func = &Rdb_key_def::unpack_double; return true; case MYSQL_TYPE_FLOAT: - m_unpack_func = rdb_unpack_float; + m_unpack_func = &Rdb_key_def::unpack_float; return true; case MYSQL_TYPE_NEWDECIMAL: @@ -2388,7 +2835,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, case MYSQL_TYPE_TIME2: /* TIME is packed with Field_timef::make_sort_key */ case MYSQL_TYPE_YEAR: /* YEAR is packed with Field_tiny::make_sort_key */ /* Everything that comes here is packed with just a memcpy(). */ - m_unpack_func = rdb_unpack_binary_str; + m_unpack_func = &Rdb_key_def::unpack_binary_str; return true; case MYSQL_TYPE_NEWDATE: @@ -2397,7 +2844,7 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, 3 bytes, and packing is done by swapping the byte order (for both big- and little-endian) */ - m_unpack_func = rdb_unpack_newdate; + m_unpack_func = &Rdb_key_def::unpack_newdate; return true; case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: @@ -2444,10 +2891,15 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // The default for varchar is variable-length, without space-padding for // comparisons m_varchar_charset = cs; - m_skip_func = rdb_skip_variable_length; - m_pack_func = rdb_pack_with_varchar_encoding; - m_max_image_len = - (m_max_image_len / (RDB_ESCAPE_LENGTH - 1) + 1) * RDB_ESCAPE_LENGTH; + m_skip_func = &Rdb_key_def::skip_variable_length; + m_pack_func = &Rdb_key_def::pack_with_varchar_encoding; + if (!key_descr || key_descr->use_legacy_varbinary_format()) { + m_max_image_len = RDB_LEGACY_ENCODED_SIZE(m_max_image_len); + } else { + // Calculate the maximum size of the short section plus the + // maximum size of the long section + m_max_image_len = RDB_ENCODED_SIZE(m_max_image_len); + } const auto field_var = static_cast(field); m_unpack_info_uses_two_bytes = (field_var->field_length + 8 >= 0x100); @@ -2465,8 +2917,8 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // - For VARBINARY(N), values may have different lengths, so we're using // variable-length encoding. This is also the only charset where the // values are not space-padded for comparison. - m_unpack_func = is_varchar ? rdb_unpack_binary_or_utf8_varchar - : rdb_unpack_binary_str; + m_unpack_func = is_varchar ? &Rdb_key_def::unpack_binary_or_utf8_varchar + : &Rdb_key_def::unpack_binary_str; res = true; } else if (cs == &my_charset_latin1_bin || cs == &my_charset_utf8_bin) { // For _bin collations, mem-comparable form of the string is the string @@ -2476,10 +2928,10 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // VARCHARs - are compared as if they were space-padded - but are // not actually space-padded (reading the value back produces the // original value, without the padding) - m_unpack_func = rdb_unpack_binary_or_utf8_varchar_space_pad; - m_skip_func = rdb_skip_variable_space_pad; - m_pack_func = rdb_pack_with_varchar_space_pad; - m_make_unpack_info_func = rdb_dummy_make_unpack_info; + m_unpack_func = &Rdb_key_def::unpack_binary_or_utf8_varchar_space_pad; + m_skip_func = &Rdb_key_def::skip_variable_space_pad; + m_pack_func = &Rdb_key_def::pack_with_varchar_space_pad; + m_make_unpack_info_func = &Rdb_key_def::dummy_make_unpack_info; m_segment_size = get_segment_size_from_collation(cs); m_max_image_len = (max_image_len_before_chunks / (m_segment_size - 1) + 1) * @@ -2489,8 +2941,9 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, } else { // SQL layer pads CHAR(N) values to their maximum length. // We just store that and restore it back. - m_unpack_func = (cs == &my_charset_latin1_bin) ? rdb_unpack_binary_str - : rdb_unpack_utf8_str; + m_unpack_func = (cs == &my_charset_latin1_bin) + ? &Rdb_key_def::unpack_binary_str + : &Rdb_key_def::unpack_utf8_str; } res = true; } else { @@ -2512,8 +2965,8 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // Currently we handle these collations as NO_PAD, even if they have // PAD_SPACE attribute. if (cs->levels_for_order == 1) { - m_pack_func = rdb_pack_with_varchar_space_pad; - m_skip_func = rdb_skip_variable_space_pad; + m_pack_func = &Rdb_key_def::pack_with_varchar_space_pad; + m_skip_func = &Rdb_key_def::skip_variable_space_pad; m_segment_size = get_segment_size_from_collation(cs); m_max_image_len = (max_image_len_before_chunks / (m_segment_size - 1) + 1) * @@ -2528,8 +2981,8 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // NO_LINT_DEBUG sql_print_warning("MyRocks will handle this collation internally " " as if it had a NO_PAD attribute."); - m_pack_func = rdb_pack_with_varchar_encoding; - m_skip_func = rdb_skip_variable_length; + m_pack_func = &Rdb_key_def::pack_with_varchar_encoding; + m_skip_func = &Rdb_key_def::skip_variable_length; } } @@ -2545,10 +2998,11 @@ bool Rdb_field_packing::setup(const Rdb_key_def *const key_descr, // form. Our way of restoring the original value is to keep a copy of // the original value in unpack_info. m_unpack_info_stores_value = true; - m_make_unpack_info_func = is_varchar ? rdb_make_unpack_unknown_varchar - : rdb_make_unpack_unknown; - m_unpack_func = - is_varchar ? rdb_unpack_unknown_varchar : rdb_unpack_unknown; + m_make_unpack_info_func = + is_varchar ? &Rdb_key_def::make_unpack_unknown_varchar + : &Rdb_key_def::make_unpack_unknown; + m_unpack_func = is_varchar ? &Rdb_key_def::unpack_unknown_varchar + : &Rdb_key_def::unpack_unknown; } else { // Same as above: we don't know how to restore the value from its // mem-comparable form. @@ -2643,7 +3097,6 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, uchar flags = (kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) | - (kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0) | (kd.m_is_per_partition_cf ? Rdb_key_def::PER_PARTITION_CF_FLAG : 0); const uint cf_id = kd.get_cf()->GetID(); @@ -2665,11 +3118,8 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE; if (existing_cf_flags != flags) { - my_printf_error(ER_UNKNOWN_ERROR, - "Column family ('%s') flag (%d) is different from an " - "existing flag (%d). Assign a new CF flag, or do not " - "change existing CF flag.", MYF(0), cf_name.c_str(), - flags, existing_cf_flags); + my_error(ER_CF_DIFFERENT, MYF(0), cf_name.c_str(), flags, + existing_cf_flags); return true; } } else { @@ -2678,9 +3128,16 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, rdb_netstr_append_uint32(&indexes, cf_id); rdb_netstr_append_uint32(&indexes, kd.m_index_number); - dict->add_or_update_index_cf_mapping(batch, kd.m_index_type, - kd.m_kv_format_version, - kd.m_index_number, cf_id); + + struct Rdb_index_info index_info; + index_info.m_gl_index_id = {cf_id, kd.m_index_number}; + index_info.m_index_dict_version = Rdb_key_def::INDEX_INFO_VERSION_LATEST; + index_info.m_index_type = kd.m_index_type; + index_info.m_kv_version = kd.m_kv_format_version; + index_info.m_index_flags = kd.m_index_flags_bitmap; + index_info.m_ttl_duration = kd.m_ttl_duration; + + dict->add_or_update_index_cf_mapping(batch, &index_info); } const rocksdb::Slice skey((char *)key, keylen); @@ -2690,6 +3147,38 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict, return false; } +// Length that each index flag takes inside the record. +// Each index in the array maps to the enum INDEX_FLAG +static const std::array index_flag_lengths = { + {ROCKSDB_SIZEOF_TTL_RECORD}}; + + +bool Rdb_key_def::has_index_flag(uint32 index_flags, enum INDEX_FLAG flag) { + return flag & index_flags; +} + +uint32 Rdb_key_def::calculate_index_flag_offset(uint32 index_flags, + enum INDEX_FLAG flag) { + + DBUG_ASSERT(Rdb_key_def::has_index_flag(index_flags, flag)); + + uint offset = 0; + for (size_t bit = 0; bit < sizeof(index_flags) * CHAR_BIT; ++bit) { + int mask = 1 << bit; + + /* Exit once we've reached the proper flag */ + if (flag & mask) { + break; + } + + if (index_flags & mask) { + offset += index_flag_lengths[bit]; + } + } + + return offset; +} + void Rdb_tbl_def::check_if_is_mysql_system_table() { static const char *const system_dbs[] = { "mysql", "performance_schema", "information_schema", @@ -2939,7 +3428,7 @@ bool Rdb_validate_tbls::compare_to_actual_tables(const std::string &datadir, /* Validate that all the tables in the RocksDB database dictionary match the .frm - files in the datdir + files in the datadir */ bool Rdb_ddl_manager::validate_schemas(void) { bool has_errors = false; @@ -3018,7 +3507,7 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, // Now, read the DDLs. const int real_val_size = val.size() - Rdb_key_def::VERSION_SIZE; - if (real_val_size % Rdb_key_def::PACKED_SIZE * 2) { + if (real_val_size % Rdb_key_def::PACKED_SIZE * 2 > 0) { sql_print_error("RocksDB: Table_store: invalid keylist for table %s", tdef->full_tablename().c_str()); return true; @@ -3038,12 +3527,9 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, for (uint keyno = 0; ptr < ptr_end; keyno++) { GL_INDEX_ID gl_index_id; rdb_netbuf_read_gl_index(&ptr, &gl_index_id); - uint16 m_index_dict_version = 0; - uchar m_index_type = 0; - uint16 kv_version = 0; uint flags = 0; - if (!m_dict->get_index_info(gl_index_id, &m_index_dict_version, - &m_index_type, &kv_version)) { + struct Rdb_index_info index_info; + if (!m_dict->get_index_info(gl_index_id, &index_info)) { sql_print_error("RocksDB: Could not get index information " "for Index Number (%u,%u), table %s", gl_index_id.cf_id, gl_index_id.index_id, @@ -3064,21 +3550,37 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, return true; } + if ((flags & Rdb_key_def::AUTO_CF_FLAG) != 0) { + // The per-index cf option is deprecated. Make sure we don't have the + // flag set in any existing database. NO_LINT_DEBUG + sql_print_error("RocksDB: The defunct AUTO_CF_FLAG is enabled for CF " + "number %d, table %s", + gl_index_id.cf_id, tdef->full_tablename().c_str()); + } + rocksdb::ColumnFamilyHandle *const cfh = cf_manager->get_cf(gl_index_id.cf_id); DBUG_ASSERT(cfh != nullptr); + uint32 ttl_rec_offset = + Rdb_key_def::has_index_flag(index_info.m_index_flags, + Rdb_key_def::TTL_FLAG) + ? Rdb_key_def::calculate_index_flag_offset( + index_info.m_index_flags, Rdb_key_def::TTL_FLAG) + : UINT_MAX; + /* We can't fully initialize Rdb_key_def object here, because full initialization requires that there is an open TABLE* where we could look at Field* objects and set max_length and other attributes */ tdef->m_key_descr_arr[keyno] = std::make_shared( - gl_index_id.index_id, keyno, cfh, m_index_dict_version, m_index_type, - kv_version, flags & Rdb_key_def::REVERSE_CF_FLAG, - flags & Rdb_key_def::AUTO_CF_FLAG, + gl_index_id.index_id, keyno, cfh, index_info.m_index_dict_version, + index_info.m_index_type, index_info.m_kv_version, + flags & Rdb_key_def::REVERSE_CF_FLAG, flags & Rdb_key_def::PER_PARTITION_CF_FLAG, "", - m_dict->get_stats(gl_index_id)); + m_dict->get_stats(gl_index_id), index_info.m_index_flags, + ttl_rec_offset, index_info.m_ttl_duration); } put(tdef); i++; @@ -3105,8 +3607,7 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, m_sequence.init(max_index_id_in_dict + 1); if (!it->status().ok()) { - const std::string s = it->status().ToString(); - sql_print_error("RocksDB: Table_store: load error: %s", s.c_str()); + rdb_log_status_error(it->status(), "Table_store load error"); return true; } delete it; @@ -3635,19 +4136,40 @@ void Rdb_binlog_manager::update_slave_gtid_info( bool Rdb_dict_manager::init(rocksdb::DB *const rdb_dict, Rdb_cf_manager *const cf_manager) { + DBUG_ASSERT(rdb_dict != nullptr); + DBUG_ASSERT(cf_manager != nullptr); + mysql_mutex_init(0, &m_mutex, MY_MUTEX_INIT_FAST); + m_db = rdb_dict; - bool is_automatic; - m_system_cfh = cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME, "", - nullptr, &is_automatic); + + m_system_cfh = cf_manager->get_or_create_cf(m_db, DEFAULT_SYSTEM_CF_NAME); + rocksdb::ColumnFamilyHandle *default_cfh = + cf_manager->get_cf(DEFAULT_CF_NAME); + + // System CF and default CF should be initialized + if (m_system_cfh == nullptr || default_cfh == nullptr) { + return HA_EXIT_FAILURE; + } + rdb_netbuf_store_index(m_key_buf_max_index_id, Rdb_key_def::MAX_INDEX_ID); + m_key_slice_max_index_id = rocksdb::Slice(reinterpret_cast(m_key_buf_max_index_id), Rdb_key_def::INDEX_NUMBER_SIZE); + resume_drop_indexes(); rollback_ongoing_index_creation(); - return (m_system_cfh == nullptr); + // Initialize system CF and default CF flags + const std::unique_ptr wb = begin(); + rocksdb::WriteBatch *const batch = wb.get(); + + add_cf_flags(batch, m_system_cfh->GetID(), 0); + add_cf_flags(batch, default_cfh->GetID(), 0); + commit(batch); + + return HA_EXIT_SUCCESS; } std::unique_ptr Rdb_dict_manager::begin() const { @@ -3682,8 +4204,8 @@ rocksdb::Iterator *Rdb_dict_manager::new_iterator() const { int Rdb_dict_manager::commit(rocksdb::WriteBatch *const batch, const bool &sync) const { if (!batch) - return HA_EXIT_FAILURE; - int res = 0; + return HA_ERR_ROCKSDB_COMMIT_FAILED; + int res = HA_EXIT_SUCCESS; rocksdb::WriteOptions options; options.sync = sync; rocksdb::Status s = m_db->Write(options, batch); @@ -3716,22 +4238,23 @@ void Rdb_dict_manager::delete_with_prefix( } void Rdb_dict_manager::add_or_update_index_cf_mapping( - rocksdb::WriteBatch *batch, const uchar m_index_type, - const uint16_t kv_version, const uint32_t index_id, - const uint32_t cf_id) const { + rocksdb::WriteBatch *batch, struct Rdb_index_info *const index_info) const { uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 3] = {0}; uchar value_buf[256] = {0}; - GL_INDEX_ID gl_index_id = {cf_id, index_id}; - dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, gl_index_id); + dump_index_id(key_buf, Rdb_key_def::INDEX_INFO, index_info->m_gl_index_id); const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); uchar *ptr = value_buf; rdb_netbuf_store_uint16(ptr, Rdb_key_def::INDEX_INFO_VERSION_LATEST); - ptr += 2; - rdb_netbuf_store_byte(ptr, m_index_type); - ptr += 1; - rdb_netbuf_store_uint16(ptr, kv_version); - ptr += 2; + ptr += RDB_SIZEOF_INDEX_INFO_VERSION; + rdb_netbuf_store_byte(ptr, index_info->m_index_type); + ptr += RDB_SIZEOF_INDEX_TYPE; + rdb_netbuf_store_uint16(ptr, index_info->m_kv_version); + ptr += RDB_SIZEOF_KV_VERSION; + rdb_netbuf_store_uint32(ptr, index_info->m_index_flags); + ptr += RDB_SIZEOF_INDEX_FLAGS; + rdb_netbuf_store_uint64(ptr, index_info->m_ttl_duration); + ptr += ROCKSDB_SIZEOF_TTL_RECORD; const rocksdb::Slice value = rocksdb::Slice((char *)value_buf, ptr - value_buf); @@ -3741,6 +4264,8 @@ void Rdb_dict_manager::add_or_update_index_cf_mapping( void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch *const batch, const uint32_t &cf_id, const uint32_t &cf_flags) const { + DBUG_ASSERT(batch != nullptr); + uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2] = {0}; uchar value_buf[Rdb_key_def::VERSION_SIZE + Rdb_key_def::INDEX_NUMBER_SIZE] = {0}; @@ -3761,10 +4286,12 @@ void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch, delete_with_prefix(batch, Rdb_key_def::INDEX_STATISTICS, gl_index_id); } -bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, - uint16_t *m_index_dict_version, - uchar *m_index_type, - uint16_t *kv_version) const { +bool Rdb_dict_manager::get_index_info( + const GL_INDEX_ID &gl_index_id, + struct Rdb_index_info *const index_info) const { + + index_info->m_gl_index_id = gl_index_id; + bool found = false; bool error = false; std::string value; @@ -3776,17 +4303,50 @@ bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, if (status.ok()) { const uchar *const val = (const uchar *)value.c_str(); const uchar *ptr = val; - *m_index_dict_version = rdb_netbuf_to_uint16(val); - *kv_version = 0; - *m_index_type = 0; - ptr += 2; - switch (*m_index_dict_version) { + index_info->m_index_dict_version = rdb_netbuf_to_uint16(val); + ptr += RDB_SIZEOF_INDEX_INFO_VERSION; + + switch (index_info->m_index_dict_version) { + case Rdb_key_def::INDEX_INFO_VERSION_FIELD_FLAGS: + /* Sanity check to prevent reading bogus TTL record. */ + if (value.size() != RDB_SIZEOF_INDEX_INFO_VERSION + + RDB_SIZEOF_INDEX_TYPE + RDB_SIZEOF_KV_VERSION + + RDB_SIZEOF_INDEX_FLAGS + + ROCKSDB_SIZEOF_TTL_RECORD) { + error = true; + break; + } + index_info->m_index_type = rdb_netbuf_to_byte(ptr); + ptr += RDB_SIZEOF_INDEX_TYPE; + index_info->m_kv_version = rdb_netbuf_to_uint16(ptr); + ptr += RDB_SIZEOF_KV_VERSION; + index_info->m_index_flags = rdb_netbuf_to_uint32(ptr); + ptr += RDB_SIZEOF_INDEX_FLAGS; + index_info->m_ttl_duration = rdb_netbuf_to_uint64(ptr); + found = true; + break; + + case Rdb_key_def::INDEX_INFO_VERSION_TTL: + /* Sanity check to prevent reading bogus into TTL record. */ + if (value.size() != RDB_SIZEOF_INDEX_INFO_VERSION + + RDB_SIZEOF_INDEX_TYPE + RDB_SIZEOF_KV_VERSION + + ROCKSDB_SIZEOF_TTL_RECORD) { + error = true; + break; + } + index_info->m_index_type = rdb_netbuf_to_byte(ptr); + ptr += RDB_SIZEOF_INDEX_TYPE; + index_info->m_kv_version = rdb_netbuf_to_uint16(ptr); + ptr += RDB_SIZEOF_KV_VERSION; + index_info->m_ttl_duration = rdb_netbuf_to_uint64(ptr); + found = true; + break; case Rdb_key_def::INDEX_INFO_VERSION_VERIFY_KV_FORMAT: case Rdb_key_def::INDEX_INFO_VERSION_GLOBAL_ID: - *m_index_type = rdb_netbuf_to_byte(ptr); - ptr += 1; - *kv_version = rdb_netbuf_to_uint16(ptr); + index_info->m_index_type = rdb_netbuf_to_byte(ptr); + ptr += RDB_SIZEOF_INDEX_TYPE; + index_info->m_kv_version = rdb_netbuf_to_uint16(ptr); found = true; break; @@ -3795,14 +4355,16 @@ bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, break; } - switch (*m_index_type) { + switch (index_info->m_index_type) { case Rdb_key_def::INDEX_TYPE_PRIMARY: case Rdb_key_def::INDEX_TYPE_HIDDEN_PRIMARY: { - error = *kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; + error = + index_info->m_kv_version > Rdb_key_def::PRIMARY_FORMAT_VERSION_LATEST; break; } case Rdb_key_def::INDEX_TYPE_SECONDARY: - error = *kv_version > Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; + error = index_info->m_kv_version > + Rdb_key_def::SECONDARY_FORMAT_VERSION_LATEST; break; default: error = true; @@ -3812,10 +4374,12 @@ bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, if (error) { // NO_LINT_DEBUG - sql_print_error("RocksDB: Found invalid key version number (%u, %u, %u) " - "from data dictionary. This should never happen " - "and it may be a bug.", - *m_index_dict_version, *m_index_type, *kv_version); + sql_print_error( + "RocksDB: Found invalid key version number (%u, %u, %u, %llu) " + "from data dictionary. This should never happen " + "and it may be a bug.", + index_info->m_index_dict_version, index_info->m_index_type, + index_info->m_kv_version, index_info->m_ttl_duration); abort_with_stack_traces(); } @@ -3824,22 +4388,31 @@ bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id, bool Rdb_dict_manager::get_cf_flags(const uint32_t &cf_id, uint32_t *const cf_flags) const { + DBUG_ASSERT(cf_flags != nullptr); + bool found = false; std::string value; uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2] = {0}; + rdb_netbuf_store_uint32(key_buf, Rdb_key_def::CF_DEFINITION); rdb_netbuf_store_uint32(key_buf + Rdb_key_def::INDEX_NUMBER_SIZE, cf_id); - const rocksdb::Slice key = rocksdb::Slice((char *)key_buf, sizeof(key_buf)); + const rocksdb::Slice key = + rocksdb::Slice(reinterpret_cast(key_buf), sizeof(key_buf)); const rocksdb::Status status = get_value(key, &value); + if (status.ok()) { const uchar *val = (const uchar *)value.c_str(); - uint16_t version = rdb_netbuf_to_uint16(val); + DBUG_ASSERT(val); + + const uint16_t version = rdb_netbuf_to_uint16(val); + if (version == Rdb_key_def::CF_DEFINITION_VERSION) { *cf_flags = rdb_netbuf_to_uint32(val + Rdb_key_def::VERSION_SIZE); found = true; } } + return found; } @@ -4028,13 +4601,6 @@ void Rdb_dict_manager::finish_indexes_operation( for (const auto &gl_index_id : gl_index_ids) { if (is_index_operation_ongoing(gl_index_id, dd_type)) { - // NO_LINT_DEBUG - sql_print_information("RocksDB: Finished %s (%u,%u)", - dd_type == Rdb_key_def::DDL_DROP_INDEX_ONGOING - ? "filtering dropped index" - : "index creation", - gl_index_id.cf_id, gl_index_id.index_id); - end_ongoing_index_operation(batch, gl_index_id, dd_type); /* @@ -4110,12 +4676,8 @@ void Rdb_dict_manager::log_start_drop_table( void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, const char *log_action) const { - uint16 m_index_dict_version = 0; - uchar m_index_type = 0; - uint16 kv_version = 0; - - if (!get_index_info(gl_index_id, &m_index_dict_version, &m_index_type, - &kv_version)) { + struct Rdb_index_info index_info; + if (!get_index_info(gl_index_id, &index_info)) { /* If we don't find the index info, it could be that it's because it was a partially created index that isn't in the data dictionary yet that needs @@ -4133,8 +4695,6 @@ void Rdb_dict_manager::log_start_drop_index(GL_INDEX_ID gl_index_id, abort_with_stack_traces(); } } - sql_print_information("RocksDB: %s filtering dropped index (%u,%u)", - log_action, gl_index_id.cf_id, gl_index_id.index_id); } bool Rdb_dict_manager::get_max_index_id(uint32_t *const index_id) const { diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index 7c1b2bf6dec25..e91e19aa92561 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -69,24 +69,24 @@ class Rdb_pack_field_context { }; struct Rdb_collation_codec; +struct Rdb_index_info; /* C-style "virtual table" allowing different handling of packing logic based on the field type. See Rdb_field_packing::setup() implementation. */ -using rdb_make_unpack_info_t = void (*)(const Rdb_collation_codec *codec, - const Field *field, - Rdb_pack_field_context *pack_ctx); -using rdb_index_field_unpack_t = int (*)(Rdb_field_packing *fpi, Field *field, - uchar *field_ptr, - Rdb_string_reader *reader, - Rdb_string_reader *unpack_reader); -using rdb_index_field_skip_t = int (*)(const Rdb_field_packing *fpi, - const Field *field, - Rdb_string_reader *reader); -using rdb_index_field_pack_t = void (*)(Rdb_field_packing *fpi, Field *field, - uchar *buf, uchar **dst, - Rdb_pack_field_context *pack_ctx); +using rdb_make_unpack_info_t = + void (Rdb_key_def::*)(const Rdb_collation_codec *codec, const Field *field, + Rdb_pack_field_context *pack_ctx) const; +using rdb_index_field_unpack_t = int (Rdb_key_def::*)( + Rdb_field_packing *fpi, Field *field, uchar *field_ptr, + Rdb_string_reader *reader, Rdb_string_reader *unpack_reader) const; +using rdb_index_field_skip_t = + int (Rdb_key_def::*)(const Rdb_field_packing *fpi, const Field *field, + Rdb_string_reader *reader) const; +using rdb_index_field_pack_t = + void (Rdb_key_def::*)(Rdb_field_packing *fpi, Field *field, uchar *buf, + uchar **dst, Rdb_pack_field_context *pack_ctx) const; const uint RDB_INVALID_KEY_LEN = uint(-1); @@ -114,6 +114,14 @@ const size_t RDB_UNPACK_DATA_LEN_SIZE = sizeof(uint16_t); const size_t RDB_UNPACK_HEADER_SIZE = sizeof(RDB_UNPACK_DATA_TAG) + RDB_UNPACK_DATA_LEN_SIZE; +/* + Data dictionary index info field sizes. +*/ +const size_t RDB_SIZEOF_INDEX_INFO_VERSION = sizeof(uint16); +const size_t RDB_SIZEOF_INDEX_TYPE = sizeof(uchar); +const size_t RDB_SIZEOF_KV_VERSION = sizeof(uint16); +const size_t RDB_SIZEOF_INDEX_FLAGS = sizeof(uint32); + // Possible return values for rdb_index_field_unpack_t functions. enum { UNPACK_SUCCESS = 0, @@ -167,20 +175,19 @@ class Rdb_key_def { uchar *const packed_tuple, const uchar *const key_tuple, const key_part_map &keypart_map) const; - uchar *pack_field(Field *const field, - Rdb_field_packing *pack_info, - uchar * tuple, - uchar *const packed_tuple, - uchar *const pack_buffer, + uchar *pack_field(Field *const field, Rdb_field_packing *pack_info, + uchar *tuple, uchar *const packed_tuple, + uchar *const pack_buffer, Rdb_string_writer *const unpack_info, - uint *const n_null_fields) const; + uint *const n_null_fields) const; /* Convert a key from Table->record format to mem-comparable form */ uint pack_record(const TABLE *const tbl, uchar *const pack_buffer, const uchar *const record, uchar *const packed_tuple, Rdb_string_writer *const unpack_info, const bool &should_store_row_debug_checksums, const longlong &hidden_pk_id = 0, uint n_key_parts = 0, - uint *const n_null_fields = nullptr) const; + uint *const n_null_fields = nullptr, + uint *const ttl_pk_offset = nullptr) const; /* Pack the hidden primary key into mem-comparable form. */ uint pack_hidden_pk(const longlong &hidden_pk_id, uchar *const packed_tuple) const; @@ -275,6 +282,8 @@ class Rdb_key_def { uint get_key_parts() const { return m_key_parts; } + uint get_ttl_field_offset() const { return m_ttl_field_offset; } + /* Get a field object for key part #part_no @@ -299,8 +308,9 @@ class Rdb_key_def { rocksdb::ColumnFamilyHandle *cf_handle_arg, uint16_t index_dict_version_arg, uchar index_type_arg, uint16_t kv_format_version_arg, bool is_reverse_cf_arg, - bool is_auto_cf_arg, bool is_per_partition_cf, const char *name, - Rdb_index_stats stats = Rdb_index_stats()); + bool is_per_partition_cf, const char *name, + Rdb_index_stats stats = Rdb_index_stats(), uint32 index_flags = 0, + uint32 ttl_rec_offset = UINT_MAX, uint64 ttl_duration = 0); ~Rdb_key_def(); enum { @@ -314,10 +324,20 @@ class Rdb_key_def { // bit flags for combining bools when writing to disk enum { REVERSE_CF_FLAG = 1, - AUTO_CF_FLAG = 2, + AUTO_CF_FLAG = 2, // Deprecated PER_PARTITION_CF_FLAG = 4, }; + // bit flags which denote myrocks specific fields stored in the record + // currently only used for TTL. + enum INDEX_FLAG { + TTL_FLAG = 1 << 0, + + // MAX_FLAG marks where the actual record starts + // This flag always needs to be set to the last index flag enum. + MAX_FLAG = TTL_FLAG << 1, + }; + // Set of flags to ignore when comparing two CF-s and determining if // they're same. static const uint CF_FLAGS_TO_IGNORE = PER_PARTITION_CF_FLAG; @@ -351,7 +371,7 @@ class Rdb_key_def { // INDEX_INFO layout. Update INDEX_INFO_VERSION_LATEST to point to the // latest version number. enum { - INDEX_INFO_VERSION_INITIAL = 1, // Obsolete + INDEX_INFO_VERSION_INITIAL = 1, // Obsolete INDEX_INFO_VERSION_KV_FORMAT, INDEX_INFO_VERSION_GLOBAL_ID, // There is no change to data format in this version, but this version @@ -359,8 +379,14 @@ class Rdb_key_def { // bump is needed to prevent older binaries from skipping the KV version // check inadvertently. INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + // This changes the data format to include a 8 byte TTL duration for tables + INDEX_INFO_VERSION_TTL, + // This changes the data format to include a bitmap before the TTL duration + // which will indicate in the future whether TTL or other special fields + // are turned on or off. + INDEX_INFO_VERSION_FIELD_FLAGS, // This normally point to the latest (currently it does). - INDEX_INFO_VERSION_LATEST = INDEX_INFO_VERSION_VERIFY_KV_FORMAT, + INDEX_INFO_VERSION_LATEST = INDEX_INFO_VERSION_FIELD_FLAGS, }; // MyRocks index types @@ -380,16 +406,58 @@ class Rdb_key_def { // it can be decoded from its mem-comparable form) // - VARCHAR-columns use endspace-padding. PRIMARY_FORMAT_VERSION_UPDATE1 = 11, - PRIMARY_FORMAT_VERSION_LATEST = PRIMARY_FORMAT_VERSION_UPDATE1, + // This change includes: + // - Binary encoded variable length fields have a new format that avoids + // an inefficient where data that was a multiple of 8 bytes in length + // had an extra 9 bytes of encoded data. + PRIMARY_FORMAT_VERSION_UPDATE2 = 12, + // This change includes support for TTL + // - This means that when TTL is specified for the table an 8-byte TTL + // field is prepended in front of each value. + PRIMARY_FORMAT_VERSION_TTL = 13, + PRIMARY_FORMAT_VERSION_LATEST = PRIMARY_FORMAT_VERSION_TTL, SECONDARY_FORMAT_VERSION_INITIAL = 10, // This change the SK format to include unpack_info. SECONDARY_FORMAT_VERSION_UPDATE1 = 11, - SECONDARY_FORMAT_VERSION_LATEST = SECONDARY_FORMAT_VERSION_UPDATE1, + // This change includes: + // - Binary encoded variable length fields have a new format that avoids + // an inefficient where data that was a multiple of 8 bytes in length + // had an extra 9 bytes of encoded data. + SECONDARY_FORMAT_VERSION_UPDATE2 = 12, + SECONDARY_FORMAT_VERSION_LATEST = SECONDARY_FORMAT_VERSION_UPDATE2, }; void setup(const TABLE *const table, const Rdb_tbl_def *const tbl_def); + static uint extract_ttl_duration(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + uint64 *ttl_duration); + static uint extract_ttl_col(const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, + std::string *ttl_column, uint *ttl_field_offset, + bool skip_checks = false); + inline bool has_ttl() const { return m_ttl_duration > 0; } + + static bool has_index_flag(uint32 index_flags, enum INDEX_FLAG flag); + static uint32 calculate_index_flag_offset(uint32 index_flags, + enum INDEX_FLAG flag); + + static const std::string + gen_qualifier_for_table(const char *const qualifier, + const std::string &partition_name = ""); + static const std::string + gen_cf_name_qualifier_for_partition(const std::string &s); + static const std::string + gen_ttl_duration_qualifier_for_partition(const std::string &s); + static const std::string + gen_ttl_col_qualifier_for_partition(const std::string &s); + + static const std::string parse_comment_for_qualifier( + const std::string &comment, const TABLE *const table_arg, + const Rdb_tbl_def *const tbl_def_arg, bool *per_part_match_found, + const char *const qualifier); + rocksdb::ColumnFamilyHandle *get_cf() const { return m_cf_handle; } /* Check if keypart #kp can be unpacked from index tuple */ @@ -407,7 +475,126 @@ class Rdb_key_def { or at least sk_min if SK.*/ bool index_format_min_check(const int &pk_min, const int &sk_min) const; -private: + void pack_with_make_sort_key( + Rdb_field_packing *const fpi, Field *const field, + uchar *buf MY_ATTRIBUTE((__unused__)), uchar **dst, + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) const; + + void pack_with_varchar_encoding( + Rdb_field_packing *const fpi, Field *const field, uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx MY_ATTRIBUTE((__unused__))) const; + + void + pack_with_varchar_space_pad(Rdb_field_packing *const fpi, Field *const field, + uchar *buf, uchar **dst, + Rdb_pack_field_context *const pack_ctx) const; + + int unpack_integer(Rdb_field_packing *const fpi, Field *const field, + uchar *const to, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) const; + + int unpack_double(Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) const; + + int unpack_float(Rdb_field_packing *const fpi, + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) const; + + int unpack_binary_str(Rdb_field_packing *const fpi, Field *const field, + uchar *const to, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) const; + + int unpack_binary_or_utf8_varchar( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader MY_ATTRIBUTE((__unused__))) const; + + int unpack_binary_or_utf8_varchar_space_pad( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const; + + int unpack_newdate(Rdb_field_packing *const fpi, + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const field_ptr, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) const; + + int unpack_utf8_str(Rdb_field_packing *const fpi, Field *const field, + uchar *dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader + MY_ATTRIBUTE((__unused__))) const; + + int unpack_unknown_varchar(Rdb_field_packing *const fpi, Field *const field, + uchar *dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const; + + int unpack_simple_varchar_space_pad( + Rdb_field_packing *const fpi, Field *const field, uchar *dst, + Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const; + + int unpack_simple(Rdb_field_packing *const fpi, + Field *const field MY_ATTRIBUTE((__unused__)), + uchar *const dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const; + + int unpack_unknown(Rdb_field_packing *const fpi, Field *const field, + uchar *const dst, Rdb_string_reader *const reader, + Rdb_string_reader *const unp_reader) const; + + int unpack_floating_point(uchar *const dst, Rdb_string_reader *const reader, + const size_t &size, const int &exp_digit, + const uchar *const zero_pattern, + const uchar *const zero_val, + void (*swap_func)(uchar *, const uchar *)) const; + + void make_unpack_simple_varchar(const Rdb_collation_codec *const codec, + const Field *const field, + Rdb_pack_field_context *const pack_ctx) const; + + void make_unpack_simple(const Rdb_collation_codec *const codec, + const Field *const field, + Rdb_pack_field_context *const pack_ctx) const; + + void make_unpack_unknown( + const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_pack_field_context *const pack_ctx) const; + + void make_unpack_unknown_varchar( + const Rdb_collation_codec *const codec MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_pack_field_context *const pack_ctx) const; + + void dummy_make_unpack_info( + const Rdb_collation_codec *codec MY_ATTRIBUTE((__unused__)), + const Field *field MY_ATTRIBUTE((__unused__)), + Rdb_pack_field_context *pack_ctx MY_ATTRIBUTE((__unused__))) const; + + int skip_max_length(const Rdb_field_packing *const fpi, + const Field *const field MY_ATTRIBUTE((__unused__)), + Rdb_string_reader *const reader) const; + + int skip_variable_length( + const Rdb_field_packing *const fpi MY_ATTRIBUTE((__unused__)), + const Field *const field, Rdb_string_reader *const reader) const; + + int skip_variable_space_pad(const Rdb_field_packing *const fpi, + const Field *const field, + Rdb_string_reader *const reader) const; + + inline bool use_legacy_varbinary_format() const { + return !index_format_min_check(PRIMARY_FORMAT_VERSION_UPDATE2, + SECONDARY_FORMAT_VERSION_UPDATE2); + } + + private: #ifndef DBUG_OFF inline bool is_storage_available(const int &offset, const int &needed) const { const int storage_length = static_cast(max_storage_fmt_length()); @@ -422,7 +609,17 @@ class Rdb_key_def { rocksdb::ColumnFamilyHandle *m_cf_handle; -public: + void pack_legacy_variable_format(const uchar *src, size_t src_len, + uchar **dst) const; + + void pack_variable_format(const uchar *src, size_t src_len, + uchar **dst) const; + + uint calc_unpack_legacy_variable_format(uchar flag, bool *done) const; + + uint calc_unpack_variable_format(uchar flag, bool *done) const; + + public: uint16_t m_index_dict_version; uchar m_index_type; /* KV format version for the index id */ @@ -430,15 +627,30 @@ class Rdb_key_def { /* If true, the column family stores data in the reverse order */ bool m_is_reverse_cf; - bool m_is_auto_cf; - /* If true, then column family is created per partition. */ bool m_is_per_partition_cf; std::string m_name; mutable Rdb_index_stats m_stats; -private: + /* + Bitmap containing information about whether TTL or other special fields + are enabled for the given index. + */ + uint32 m_index_flags_bitmap; + + /* + Offset in the records where the 8-byte TTL is stored (UINT_MAX if no TTL) + */ + uint32 m_ttl_rec_offset; + + /* Default TTL duration */ + uint64 m_ttl_duration; + + /* TTL column (if defined by user, otherwise implicit TTL is used) */ + std::string m_ttl_column; + + private: friend class Rdb_tbl_def; // for m_index_number above /* Number of key parts in the primary key*/ @@ -461,6 +673,18 @@ class Rdb_key_def { */ uint m_key_parts; + /* + If TTL column is part of the PK, offset of the column within pk. + Default is UINT_MAX to denote that TTL col is not part of PK. + */ + uint m_ttl_pk_key_part_offset; + + /* + Index of the TTL column in table->s->fields, if it exists. + Default is UINT_MAX to denote that it does not exist. + */ + uint m_ttl_field_offset; + /* Prefix extractor for the column family of the key definiton */ std::shared_ptr m_prefix_extractor; @@ -892,12 +1116,13 @@ class Rdb_binlog_manager { 2. internal cf_id, index id => index information key: Rdb_key_def::INDEX_INFO(0x2) + cf_id + index_id - value: version, index_type, kv_format_version + value: version, index_type, kv_format_version, ttl_duration index_type is 1 byte, version and kv_format_version are 2 bytes. + ttl_duration is 8 bytes. 3. CF id => CF flags key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id - value: version, {is_reverse_cf, is_auto_cf, is_per_partition_cf} + value: version, {is_reverse_cf, is_auto_cf (deprecated), is_per_partition_cf} cf_flags is 4 bytes in total. 4. Binlog entry (updated at commit) @@ -964,6 +1189,10 @@ class Rdb_dict_manager { inline void unlock() { RDB_MUTEX_UNLOCK_CHECK(m_mutex); } + inline rocksdb::ColumnFamilyHandle *get_system_cf() const { + return m_system_cfh; + } + /* Raw RocksDB operations */ std::unique_ptr begin() const; int commit(rocksdb::WriteBatch *const batch, const bool &sync = true) const; @@ -976,16 +1205,13 @@ class Rdb_dict_manager { rocksdb::Iterator *new_iterator() const; /* Internal Index id => CF */ - void add_or_update_index_cf_mapping(rocksdb::WriteBatch *batch, - const uchar index_type, - const uint16_t kv_version, - const uint index_id, - const uint cf_id) const; + void + add_or_update_index_cf_mapping(rocksdb::WriteBatch *batch, + struct Rdb_index_info *const index_info) const; void delete_index_info(rocksdb::WriteBatch *batch, const GL_INDEX_ID &index_id) const; bool get_index_info(const GL_INDEX_ID &gl_index_id, - uint16_t *index_dict_version, uchar *index_type, - uint16_t *kv_version) const; + struct Rdb_index_info *const index_info) const; /* CF id => CF flags */ void add_cf_flags(rocksdb::WriteBatch *const batch, const uint &cf_id, @@ -1063,4 +1289,13 @@ class Rdb_dict_manager { Rdb_index_stats get_stats(GL_INDEX_ID gl_index_id) const; }; +struct Rdb_index_info { + GL_INDEX_ID m_gl_index_id; + uint16_t m_index_dict_version = 0; + uchar m_index_type = 0; + uint16_t m_kv_version = 0; + uint32 m_index_flags = 0; + uint64 m_ttl_duration = 0; +}; + } // namespace myrocks diff --git a/storage/rocksdb/rdb_i_s.cc b/storage/rocksdb/rdb_i_s.cc index 158a7070ea9c5..1db521cee4ca2 100644 --- a/storage/rocksdb/rdb_i_s.cc +++ b/storage/rocksdb/rdb_i_s.cc @@ -15,6 +15,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* C++ standard header files */ +#include +#include #include #include #include @@ -69,7 +71,11 @@ static int rdb_i_s_cfstats_fill_table( my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { DBUG_ENTER_FUNC(); - bool ret; + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + int ret = 0; uint64_t val; const std::vector> cf_properties = { @@ -91,26 +97,24 @@ static int rdb_i_s_cfstats_fill_table( {rocksdb::DB::Properties::kNumLiveVersions, "NUM_LIVE_VERSIONS"}}; rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } + const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); - DBUG_ASSERT(rdb != nullptr); for (const auto &cf_name : cf_manager.get_cf_names()) { - rocksdb::ColumnFamilyHandle *cfh; - bool is_automatic; - - /* - Only the cf name is important. Whether it was generated automatically - does not matter, so is_automatic is ignored. - */ - cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); - if (cfh == nullptr) + DBUG_ASSERT(!cf_name.empty()); + rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); + if (cfh == nullptr) { continue; + } for (const auto &property : cf_properties) { - if (!rdb->GetIntProperty(cfh, property.first, &val)) + if (!rdb->GetIntProperty(cfh, property.first, &val)) { continue; - - DBUG_ASSERT(tables != nullptr); + } tables->table->field[RDB_CFSTATS_FIELD::CF_NAME]->store( cf_name.c_str(), cf_name.size(), system_charset_info); @@ -118,12 +122,15 @@ static int rdb_i_s_cfstats_fill_table( property.second.c_str(), property.second.size(), system_charset_info); tables->table->field[RDB_CFSTATS_FIELD::VALUE]->store(val, true); - ret = my_core::schema_table_store_record(thd, tables->table); + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); - if (ret) + if (ret) { DBUG_RETURN(ret); + } } } + DBUG_RETURN(0); } @@ -159,7 +166,11 @@ static int rdb_i_s_dbstats_fill_table( my_core::Item *const cond MY_ATTRIBUTE((__unused__))) { DBUG_ENTER_FUNC(); - bool ret; + DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); + + int ret = 0; uint64_t val; const std::vector> db_properties = { @@ -169,23 +180,29 @@ static int rdb_i_s_dbstats_fill_table( "DB_OLDEST_SNAPSHOT_TIME"}}; rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } + const rocksdb::BlockBasedTableOptions &table_options = rdb_get_table_options(); for (const auto &property : db_properties) { - if (!rdb->GetIntProperty(property.first, &val)) + if (!rdb->GetIntProperty(property.first, &val)) { continue; - - DBUG_ASSERT(tables != nullptr); + } tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( property.second.c_str(), property.second.size(), system_charset_info); tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); - ret = my_core::schema_table_store_record(thd, tables->table); + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); - if (ret) + if (ret) { DBUG_RETURN(ret); + } } /* @@ -199,11 +216,13 @@ static int rdb_i_s_dbstats_fill_table( information from the column family. */ val = (table_options.block_cache ? table_options.block_cache->GetUsage() : 0); + tables->table->field[RDB_DBSTATS_FIELD::STAT_TYPE]->store( STRING_WITH_LEN("DB_BLOCK_CACHE_USAGE"), system_charset_info); tables->table->field[RDB_DBSTATS_FIELD::VALUE]->store(val, true); - ret = my_core::schema_table_store_record(thd, tables->table); + ret = + static_cast(my_core::schema_table_store_record(thd, tables->table)); DBUG_RETURN(ret); } @@ -246,17 +265,28 @@ static int rdb_i_s_perf_context_fill_table( DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); int ret = 0; Field **field = tables->table->field; + DBUG_ASSERT(field != nullptr); + + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } const std::vector tablenames = rdb_get_open_table_names(); + for (const auto &it : tablenames) { std::string str, dbname, tablename, partname; Rdb_perf_counters counters; - if (rdb_normalize_tablename(it, &str)) { - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + int rc = rdb_normalize_tablename(it, &str); + + if (rc != HA_EXIT_SUCCESS) { + DBUG_RETURN(rc); } if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) { @@ -267,12 +297,11 @@ static int rdb_i_s_perf_context_fill_table( continue; } - DBUG_ASSERT(field != nullptr); - field[RDB_PERF_CONTEXT_FIELD::TABLE_SCHEMA]->store( dbname.c_str(), dbname.size(), system_charset_info); field[RDB_PERF_CONTEXT_FIELD::TABLE_NAME]->store( tablename.c_str(), tablename.size(), system_charset_info); + if (partname.size() == 0) { field[RDB_PERF_CONTEXT_FIELD::PARTITION_NAME]->set_null(); } else { @@ -287,9 +316,12 @@ static int rdb_i_s_perf_context_fill_table( system_charset_info); field[RDB_PERF_CONTEXT_FIELD::VALUE]->store(counters.m_value[i], true); - ret = my_core::schema_table_store_record(thd, tables->table); - if (ret) + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); + + if (ret) { DBUG_RETURN(ret); + } } } @@ -330,26 +362,34 @@ static int rdb_i_s_perf_context_global_fill_table( DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); int ret = 0; + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } + // Get a copy of the global perf counters. Rdb_perf_counters global_counters; rdb_get_global_perf_counters(&global_counters); for (int i = 0; i < PC_MAX_IDX; i++) { - DBUG_ASSERT(tables->table != nullptr); - DBUG_ASSERT(tables->table->field != nullptr); - tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::STAT_TYPE]->store( rdb_pc_stat_types[i].c_str(), rdb_pc_stat_types[i].size(), system_charset_info); tables->table->field[RDB_PERF_CONTEXT_GLOBAL_FIELD::VALUE]->store( global_counters.m_value[i], true); - ret = my_core::schema_table_store_record(thd, tables->table); - if (ret) + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); + + if (ret) { DBUG_RETURN(ret); + } } DBUG_RETURN(0); @@ -391,13 +431,21 @@ static int rdb_i_s_cfoptions_fill_table( DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); - bool ret; + int ret = 0; + + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); for (const auto &cf_name : cf_manager.get_cf_names()) { std::string val; rocksdb::ColumnFamilyOptions opts; + + DBUG_ASSERT(!cf_name.empty()); cf_manager.get_cf_options(cf_name, &opts); std::vector> cf_option_types = { @@ -467,29 +515,37 @@ static int rdb_i_s_cfoptions_fill_table( // get MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL option value val = opts.max_bytes_for_level_multiplier_additional.empty() ? "NULL" : ""; + for (const auto &level : opts.max_bytes_for_level_multiplier_additional) { val.append(std::to_string(level) + ":"); } + val.pop_back(); cf_option_types.push_back( {"MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL", val}); // get COMPRESSION_TYPE option value GetStringFromCompressionType(&val, opts.compression); + if (val.empty()) { val = "NULL"; } + cf_option_types.push_back({"COMPRESSION_TYPE", val}); // get COMPRESSION_PER_LEVEL option value val = opts.compression_per_level.empty() ? "NULL" : ""; + for (const auto &compression_type : opts.compression_per_level) { std::string res; + GetStringFromCompressionType(&res, compression_type); + if (!res.empty()) { val.append(res + ":"); } } + val.pop_back(); cf_option_types.push_back({"COMPRESSION_PER_LEVEL", val}); @@ -497,12 +553,15 @@ static int rdb_i_s_cfoptions_fill_table( val = std::to_string(opts.compression_opts.window_bits) + ":"; val.append(std::to_string(opts.compression_opts.level) + ":"); val.append(std::to_string(opts.compression_opts.strategy)); + cf_option_types.push_back({"COMPRESSION_OPTS", val}); // bottommost_compression if (opts.bottommost_compression) { std::string res; + GetStringFromCompressionType(&res, opts.bottommost_compression); + if (!res.empty()) { cf_option_types.push_back({"BOTTOMMOST_COMPRESSION", res}); } @@ -531,12 +590,15 @@ static int rdb_i_s_cfoptions_fill_table( default: val = "NULL"; } + cf_option_types.push_back({"COMPACTION_STYLE", val}); // get COMPACTION_OPTIONS_UNIVERSAL related options const rocksdb::CompactionOptionsUniversal compac_opts = opts.compaction_options_universal; + val = "{SIZE_RATIO="; + val.append(std::to_string(compac_opts.size_ratio)); val.append("; MIN_MERGE_WIDTH="); val.append(std::to_string(compac_opts.min_merge_width)); @@ -547,6 +609,7 @@ static int rdb_i_s_cfoptions_fill_table( val.append("; COMPRESSION_SIZE_PERCENT="); val.append(std::to_string(compac_opts.compression_size_percent)); val.append("; STOP_STYLE="); + switch (compac_opts.stop_style) { case rocksdb::kCompactionStopStyleSimilarSize: val.append("kCompactionStopStyleSimilarSize}"); @@ -557,6 +620,7 @@ static int rdb_i_s_cfoptions_fill_table( default: val.append("}"); } + cf_option_types.push_back({"COMPACTION_OPTIONS_UNIVERSAL", val}); // get COMPACTION_OPTION_FIFO option @@ -564,96 +628,24 @@ static int rdb_i_s_cfoptions_fill_table( {"COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE", std::to_string(opts.compaction_options_fifo.max_table_files_size)}); - // get block-based table related options - const rocksdb::BlockBasedTableOptions &table_options = - rdb_get_table_options(); + // get table related options + std::vector table_options = + split_into_vector(opts.table_factory->GetPrintableTableOptions(), '\n'); - // get BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS option - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS", - table_options.cache_index_and_filter_blocks ? "1" : "0"}); + for (auto option : table_options) { + option.erase(std::remove(option.begin(), option.end(), ' '), + option.end()); - // get BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE option value - switch (table_options.index_type) { - case rocksdb::BlockBasedTableOptions::kBinarySearch: - val = "kBinarySearch"; - break; - case rocksdb::BlockBasedTableOptions::kHashSearch: - val = "kHashSearch"; - break; - default: - val = "NULL"; - } - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE", val}); - - // get BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION option value - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION", - table_options.hash_index_allow_collision ? "ON" : "OFF"}); + int pos = option.find(":"); + std::string option_name = option.substr(0, pos); + std::string option_value = option.substr(pos + 1, option.length()); + std::transform(option_name.begin(), option_name.end(), + option_name.begin(), + [](unsigned char c) { return std::toupper(c); }); - // get BLOCK_BASED_TABLE_FACTORY::CHECKSUM option value - switch (table_options.checksum) { - case rocksdb::kNoChecksum: - val = "kNoChecksum"; - break; - case rocksdb::kCRC32c: - val = "kCRC32c"; - break; - case rocksdb::kxxHash: - val = "kxxHash"; - break; - default: - val = "NULL"; + cf_option_types.push_back( + {"TABLE_FACTORY::" + option_name, option_value}); } - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::CHECKSUM", val}); - - // get BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE option value - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE", - table_options.no_block_cache ? "ON" : "OFF"}); - - // get BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY option - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY", - table_options.filter_policy == nullptr - ? "NULL" - : std::string(table_options.filter_policy->Name())}); - - // get BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING", - table_options.whole_key_filtering ? "1" : "0"}); - - // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE option - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE", - table_options.block_cache == nullptr - ? "NULL" - : std::to_string(table_options.block_cache->GetUsage())}); - - // get BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED option - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED", - table_options.block_cache_compressed == nullptr - ? "NULL" - : std::to_string( - table_options.block_cache_compressed->GetUsage())}); - - // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE", - std::to_string(table_options.block_size)}); - - // get BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION option - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION", - std::to_string(table_options.block_size_deviation)}); - - // get BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL option - cf_option_types.push_back( - {"BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL", - std::to_string(table_options.block_restart_interval)}); - - // get BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION option - cf_option_types.push_back({"BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION", - std::to_string(table_options.format_version)}); for (const auto &cf_option_type : cf_option_types) { DBUG_ASSERT(tables->table != nullptr); @@ -668,12 +660,15 @@ static int rdb_i_s_cfoptions_fill_table( cf_option_type.second.c_str(), cf_option_type.second.size(), system_charset_info); - ret = my_core::schema_table_store_record(thd, tables->table); + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); - if (ret) + if (ret) { DBUG_RETURN(ret); + } } } + DBUG_RETURN(0); } @@ -734,6 +729,12 @@ static int rdb_i_s_global_info_fill_table( int ret = 0; + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } + /* binlog info */ Rdb_binlog_manager *const blm = rdb_get_binlog_manager(); DBUG_ASSERT(blm != nullptr); @@ -745,6 +746,7 @@ static int rdb_i_s_global_info_fill_table( if (blm->read(file_buf, &pos, gtid_buf)) { snprintf(pos_buf, INT_BUF_LEN, "%lu", (uint64_t)pos); + ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "FILE", file_buf); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "POS", pos_buf); ret |= rdb_global_info_fill_row(thd, tables, "BINLOG", "GTID", gtid_buf); @@ -759,6 +761,7 @@ static int rdb_i_s_global_info_fill_table( if (dict_manager->get_max_index_id(&max_index_id)) { snprintf(max_index_id_buf, INT_BUF_LEN, "%u", max_index_id); + ret |= rdb_global_info_fill_row(thd, tables, "MAX_INDEX_ID", "MAX_INDEX_ID", max_index_id_buf); } @@ -767,17 +770,31 @@ static int rdb_i_s_global_info_fill_table( char cf_id_buf[INT_BUF_LEN] = {0}; char cf_value_buf[FN_REFLEN + 1] = {0}; const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { + DBUG_ASSERT(cf_handle != nullptr); + uint flags; - dict_manager->get_cf_flags(cf_handle->GetID(), &flags); + + if (!dict_manager->get_cf_flags(cf_handle->GetID(), &flags)) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: Failed to get column family flags " + "from CF with id = %u. MyRocks data dictionary may " + "be corrupted.", + cf_handle->GetID()); + abort_with_stack_traces(); + } + snprintf(cf_id_buf, INT_BUF_LEN, "%u", cf_handle->GetID()); snprintf(cf_value_buf, FN_REFLEN, "%s [%u]", cf_handle->GetName().c_str(), flags); + ret |= rdb_global_info_fill_row(thd, tables, "CF_FLAGS", cf_id_buf, cf_value_buf); - if (ret) + if (ret) { break; + } } /* DDL_DROP_INDEX_ONGOING */ @@ -785,14 +802,17 @@ static int rdb_i_s_global_info_fill_table( dict_manager->get_ongoing_index_operation( &gl_index_ids, Rdb_key_def::DDL_DROP_INDEX_ONGOING); char cf_id_index_buf[CF_ID_INDEX_BUF_LEN] = {0}; + for (auto gl_index_id : gl_index_ids) { snprintf(cf_id_index_buf, CF_ID_INDEX_BUF_LEN, "cf_id:%u,index_id:%u", gl_index_id.cf_id, gl_index_id.index_id); + ret |= rdb_global_info_fill_row(thd, tables, "DDL_DROP_INDEX_ONGOING", cf_id_index_buf, ""); - if (ret) + if (ret) { break; + } } DBUG_RETURN(ret); @@ -810,22 +830,21 @@ static int rdb_i_s_compact_stats_fill_table( DBUG_ENTER_FUNC(); int ret = 0; - rocksdb::DB *rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } + Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); - DBUG_ASSERT(rdb != nullptr); for (auto cf_name : cf_manager.get_cf_names()) { - rocksdb::ColumnFamilyHandle *cfh; - bool is_automatic; - /* - Only the cf name is important. Whether it was generated automatically - does not matter, so is_automatic is ignored. - */ - cfh = cf_manager.get_cf(cf_name.c_str(), "", nullptr, &is_automatic); + rocksdb::ColumnFamilyHandle *cfh = cf_manager.get_cf(cf_name); + if (cfh == nullptr) { continue; } + std::map props; bool bool_ret MY_ATTRIBUTE((__unused__)); bool_ret = rdb->GetMapProperty(cfh, "rocksdb.cfstats", &props); @@ -841,12 +860,15 @@ static int rdb_i_s_compact_stats_fill_table( Field **field = tables->table->field; DBUG_ASSERT(field != nullptr); + field[0]->store(cf_name.c_str(), cf_name.size(), system_charset_info); field[1]->store(level_str.c_str(), level_str.size(), system_charset_info); field[2]->store(type_str.c_str(), type_str.size(), system_charset_info); field[3]->store(value, true); - ret |= my_core::schema_table_store_record(thd, tables->table); + ret |= static_cast( + my_core::schema_table_store_record(thd, tables->table)); + if (ret != 0) { DBUG_RETURN(ret); } @@ -961,14 +983,24 @@ static int rdb_i_s_ddl_fill_table(my_core::THD *const thd, DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); + DBUG_ASSERT(tables->table != nullptr); + + int ret = 0; + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } Rdb_ddl_scanner ddl_arg; + ddl_arg.m_thd = thd; ddl_arg.m_table = tables->table; Rdb_ddl_manager *ddl_manager = rdb_get_ddl_manager(); DBUG_ASSERT(ddl_manager != nullptr); - int ret = ddl_manager->scan_for_tables(&ddl_arg); + + ret = ddl_manager->scan_for_tables(&ddl_arg); DBUG_RETURN(ret); } @@ -1103,14 +1135,19 @@ static int rdb_i_s_index_file_map_fill_table( /* Iterate over all the column families */ rocksdb::DB *const rdb = rdb_get_rocksdb_db(); - DBUG_ASSERT(rdb != nullptr); + + if (!rdb) { + DBUG_RETURN(ret); + } const Rdb_cf_manager &cf_manager = rdb_get_cf_manager(); + for (const auto &cf_handle : cf_manager.get_all_cf()) { /* Grab the the properties of all the tables in the column family */ rocksdb::TablePropertiesCollection table_props_collection; const rocksdb::Status s = rdb->GetPropertiesOfAllTables(cf_handle, &table_props_collection); + if (!s.ok()) { continue; } @@ -1120,12 +1157,14 @@ static int rdb_i_s_index_file_map_fill_table( for (const auto &props : table_props_collection) { /* Add the SST name into the output */ const std::string sst_name = rdb_filename_without_path(props.first); + field[RDB_INDEX_FILE_MAP_FIELD::SST_NAME]->store( sst_name.data(), sst_name.size(), system_charset_info); /* Get the __indexstats__ data out of the table property */ std::vector stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(props.second, &stats); + if (stats.empty()) { field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store(-1, true); field[RDB_INDEX_FILE_MAP_FIELD::INDEX_NUMBER]->store(-1, true); @@ -1136,7 +1175,7 @@ static int rdb_i_s_index_file_map_fill_table( field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_MERGES]->store(-1, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store(-1, true); } else { - for (auto it : stats) { + for (const auto &it : stats) { /* Add the index number, the number of rows, and data size to the * output */ field[RDB_INDEX_FILE_MAP_FIELD::COLUMN_FAMILY]->store( @@ -1154,12 +1193,14 @@ static int rdb_i_s_index_file_map_fill_table( it.m_entry_merges, true); field[RDB_INDEX_FILE_MAP_FIELD::ENTRY_OTHERS]->store( it.m_entry_others, true); + std::string distinct_keys_prefix; for (size_t i = 0; i < it.m_distinct_keys_per_prefix.size(); i++) { if (i > 0) { distinct_keys_prefix += ","; } + distinct_keys_prefix += std::to_string(it.m_distinct_keys_per_prefix[i]); } @@ -1169,7 +1210,9 @@ static int rdb_i_s_index_file_map_fill_table( system_charset_info); /* Tell MySQL about this row in the virtual table */ - ret = my_core::schema_table_store_record(thd, tables->table); + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); + if (ret != 0) { break; } @@ -1221,11 +1264,15 @@ static int rdb_i_s_lock_info_fill_table( DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); int ret = 0; rocksdb::TransactionDB *const rdb = rdb_get_rocksdb_db(); - DBUG_ASSERT(rdb != nullptr); + + if (!rdb) { + DBUG_RETURN(ret); + } /* cf id -> rocksdb::KeyLockInfo */ std::unordered_multimap lock_info = @@ -1248,12 +1295,15 @@ static int rdb_i_s_lock_info_fill_table( key_lock_info.exclusive ? "X" : "S", 1, system_charset_info); /* Tell MySQL about this row in the virtual table */ - ret = my_core::schema_table_store_record(thd, tables->table); + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); + if (ret != 0) { break; } } } + DBUG_RETURN(ret); } @@ -1328,8 +1378,14 @@ static int rdb_i_s_trx_info_fill_table( DBUG_ASSERT(thd != nullptr); DBUG_ASSERT(tables != nullptr); DBUG_ASSERT(tables->table != nullptr); + DBUG_ASSERT(tables->table->field != nullptr); int ret = 0; + rocksdb::DB *const rdb = rdb_get_rocksdb_db(); + + if (!rdb) { + DBUG_RETURN(ret); + } const std::vector &all_trx_info = rdb_get_all_trx_info(); @@ -1338,6 +1394,7 @@ static int rdb_i_s_trx_info_fill_table( rdb_hexdump(info.name.c_str(), info.name.length(), NAME_LEN); auto key_hexstr = rdb_hexdump(info.waiting_key.c_str(), info.waiting_key.length(), FN_REFLEN); + tables->table->field[RDB_TRX_FIELD::TRANSACTION_ID]->store(info.trx_id, true); tables->table->field[RDB_TRX_FIELD::STATE]->store( @@ -1369,7 +1426,9 @@ static int rdb_i_s_trx_info_fill_table( info.query_str.c_str(), info.query_str.length(), system_charset_info); /* Tell MySQL about this row in the virtual table */ - ret = my_core::schema_table_store_record(thd, tables->table); + ret = static_cast( + my_core::schema_table_store_record(thd, tables->table)); + if (ret != 0) { break; } diff --git a/storage/rocksdb/rdb_index_merge.cc b/storage/rocksdb/rdb_index_merge.cc index 8f27616e4d909..3f2de63ccfe8f 100644 --- a/storage/rocksdb/rdb_index_merge.cc +++ b/storage/rocksdb/rdb_index_merge.cc @@ -48,7 +48,7 @@ int Rdb_index_merge::init() { inplace index creation. */ if (merge_file_create()) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } /* @@ -84,7 +84,7 @@ int Rdb_index_merge::merge_file_create() { } if (fd < 0) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } m_merge_file.fd = fd; @@ -121,13 +121,13 @@ int Rdb_index_merge::add(const rocksdb::Slice &key, const rocksdb::Slice &val) { // NO_LINT_DEBUG sql_print_error("Sort buffer size is too small to process merge. " "Please set merge buffer size to a higher value."); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } if (merge_buf_write()) { // NO_LINT_DEBUG sql_print_error("Error writing sort buffer to disk."); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } } @@ -188,7 +188,7 @@ int Rdb_index_merge::merge_buf_write() { SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) { // NO_LINT_DEBUG sql_print_error("Error seeking to location in merge file on disk."); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } /* @@ -201,7 +201,7 @@ int Rdb_index_merge::merge_buf_write() { mysql_file_sync(m_merge_file.fd, MYF(MY_WME))) { // NO_LINT_DEBUG sql_print_error("Error writing sorted merge buffer to disk."); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } /* Increment merge file offset to track number of merge buffers written */ @@ -225,7 +225,7 @@ int Rdb_index_merge::merge_heap_prepare() { be written to disk. Write them out now. */ if (!m_offset_tree.empty() && merge_buf_write()) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } DBUG_ASSERT(m_merge_file.num_sort_buffers > 0); @@ -252,7 +252,7 @@ int Rdb_index_merge::merge_heap_prepare() { entry->prepare(m_merge_file.fd, i * m_merge_buf_size, chunk_size); if (total_size == (size_t)-1) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } /* Can reach this condition if an index was added on table w/ no rows */ @@ -264,7 +264,7 @@ int Rdb_index_merge::merge_heap_prepare() { if (entry->read_rec(&entry->key, &entry->val)) { // NO_LINT_DEBUG sql_print_error("Chunk size is too small to process merge."); - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } m_merge_min_heap.push(std::move(entry)); @@ -380,12 +380,12 @@ int Rdb_index_merge::merge_heap_pop_and_get_next(rocksdb::Slice *const key, */ if (entry->read_rec(&entry->key, &entry->val)) { if (entry->read_next_chunk_from_disk(m_merge_file.fd)) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } /* Try reading record again, should never fail. */ if (entry->read_rec(&entry->key, &entry->val)) { - return HA_ERR_INTERNAL_ERROR; + return HA_ERR_ROCKSDB_MERGE_FILE_ERR; } } diff --git a/storage/rocksdb/rdb_io_watchdog.cc b/storage/rocksdb/rdb_io_watchdog.cc new file mode 100644 index 0000000000000..7b229eee47deb --- /dev/null +++ b/storage/rocksdb/rdb_io_watchdog.cc @@ -0,0 +1,233 @@ +/* + Copyright (c) 2017, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* This C++ file's header */ +#include "./rdb_io_watchdog.h" + +/* C++ standard header files */ +#include +#include + +namespace myrocks { + +void Rdb_io_watchdog::expire_io_callback(union sigval timer_data) { + DBUG_ASSERT(timer_data.sival_ptr != nullptr); + + // The treatment of any pending signal generated by the deleted timer is + // unspecified. Therefore we still need to handle the rare case where we + // finished the I/O operation right before the timer was deleted and callback + // was in flight. + if (!m_io_in_progress.load()) { + return; + } + + // At this point we know that I/O has been stuck in `write()` for more than + // `m_write_timeout` seconds. We'll log a message and shut down the service. + // NO_LINT_DEBUG + sql_print_error("MyRocks has detected a combination of I/O requests which " + "have cumulatively been blocking for more than %u seconds. " + "Shutting the service down.", + m_write_timeout); + + abort_with_stack_traces(); +} + +void Rdb_io_watchdog::io_check_callback(union sigval timer_data) { + RDB_MUTEX_LOCK_CHECK(m_reset_mutex); + + DBUG_ASSERT(timer_data.sival_ptr != nullptr); + + struct sigevent e; + + e.sigev_notify = SIGEV_THREAD; + e.sigev_notify_function = &Rdb_io_watchdog::expire_io_callback_wrapper; + e.sigev_value.sival_ptr = this; + e.sigev_notify_attributes = nullptr; + + int ret = timer_create(CLOCK_MONOTONIC, &e, &m_io_check_watchdog_timer); + + if (unlikely(ret)) { + // NO_LINT_DEBUG + sql_print_warning("Creating a watchdog I/O timer failed with %d.", errno); + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + return; + } + + struct itimerspec timer_spec; + memset(&timer_spec, 0, sizeof(timer_spec)); + + // One time execution only for the watchdog. No interval. + timer_spec.it_value.tv_sec = m_write_timeout; + + ret = timer_settime(m_io_check_watchdog_timer, 0, &timer_spec, nullptr); + + if (unlikely(ret)) { + // NO_LINT_DEBUG + sql_print_warning("Setting time for a watchdog I/O timer failed with %d.", + errno); + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + return; + } + + m_io_in_progress.store(true); + + // Verify the write access to all directories we care about. + for (const std::string &directory : m_dirs_to_check) { + ret = check_write_access(directory); + + // We'll log a warning and attept to continue to see if the problem happens + // in other cases as well. + if (unlikely(ret != HA_EXIT_SUCCESS)) { + // NO_LINT_DEBUG + sql_print_warning("Unable to verify write access to %s (error code %d).", + directory.c_str(), ret); + } + } + + m_io_in_progress.store(false); + + // Clean up the watchdog timer. + ret = timer_delete(m_io_check_watchdog_timer); + + if (unlikely(ret)) { + // NO_LINT_DEBUG + sql_print_warning("Deleting the watchdog I/O timer failed with %d.", errno); + } + + m_io_check_watchdog_timer = nullptr; + + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); +} + +int Rdb_io_watchdog::check_write_access(const std::string &dirname) const { + DBUG_ASSERT(!dirname.empty()); + DBUG_ASSERT(m_buf != nullptr); + + const std::string fname = dirname + FN_DIRSEP + RDB_IO_DUMMY_FILE_NAME; + + // O_DIRECT is a key flag here to make sure that we'll bypass the kernel's + // buffer cache. + int fd = open(fname.c_str(), O_WRONLY | O_DIRECT | O_CREAT | O_SYNC, + S_IRWXU | S_IWUSR); + + if (unlikely(fd == -1)) { + return fd; + } + + int ret = write(fd, m_buf, RDB_IO_WRITE_BUFFER_SIZE); + + if (unlikely(ret != RDB_IO_WRITE_BUFFER_SIZE)) { + return ret; + } + + ret = close(fd); + + if (unlikely(ret)) { + return ret; + } + + ret = unlink(fname.c_str()); + + if (unlikely(ret)) { + return ret; + } + + return HA_EXIT_SUCCESS; +} + +int Rdb_io_watchdog::reset_timeout(const uint32_t &write_timeout) { + // This function will be called either from a thread initializing MyRocks + // engine or handling system variable changes. We need to account for the + // possibility of I/O callback executing at the same time. If that happens + // then we'll wait for it to finish. + RDB_MUTEX_LOCK_CHECK(m_reset_mutex); + + struct sigevent e; + + // In all the cases all the active timers needs to be stopped. + int ret = stop_timers(); + + if (unlikely(ret)) { + // NO_LINT_DEBUG + sql_print_warning("Stopping I/O timers failed with %d.", errno); + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + return ret; + } + + m_write_timeout = write_timeout; + m_io_in_progress.store(false); + + // Zero means that the I/O timer will be disabled. Therefore there's nothing + // for us to do here. + if (!write_timeout) { + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + return HA_EXIT_SUCCESS; + } + + free(m_buf); + + ret = posix_memalign(reinterpret_cast(&m_buf), + RDB_IO_WRITE_BUFFER_SIZE, RDB_IO_WRITE_BUFFER_SIZE); + + if (unlikely(ret)) { + m_buf = nullptr; + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + // NB! The value of errno is not set. + return ret; + } + + DBUG_ASSERT(m_buf != nullptr); + memset(m_buf, 0, RDB_IO_WRITE_BUFFER_SIZE); + + // Common case gets handled here - we'll create a timer with a specific + // interval to check a set of directories for write access. + DBUG_ASSERT(m_dirs_to_check.size() > 0); + + e.sigev_notify = SIGEV_THREAD; + e.sigev_notify_function = &Rdb_io_watchdog::io_check_callback_wrapper; + e.sigev_value.sival_ptr = this; + e.sigev_notify_attributes = nullptr; + + ret = timer_create(CLOCK_MONOTONIC, &e, &m_io_check_timer); + + if (unlikely(ret)) { + // NO_LINT_DEBUG + sql_print_warning("Creating a I/O timer failed with %d.", errno); + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + return ret; + } + + struct itimerspec timer_spec; + memset(&timer_spec, 0, sizeof(timer_spec)); + + // I/O timer will need to execute on a certain interval. + timer_spec.it_value.tv_sec = m_write_timeout; + timer_spec.it_interval.tv_sec = m_write_timeout; + + ret = timer_settime(m_io_check_timer, 0, &timer_spec, nullptr); + + if (unlikely(ret)) { + // NO_LINT_DEBUG + sql_print_warning("Setting time for a watchdog I/O timer failed with %d.", + errno); + } + + RDB_MUTEX_UNLOCK_CHECK(m_reset_mutex); + + return HA_EXIT_SUCCESS; +} + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_io_watchdog.h b/storage/rocksdb/rdb_io_watchdog.h new file mode 100644 index 0000000000000..0fb77536fb03e --- /dev/null +++ b/storage/rocksdb/rdb_io_watchdog.h @@ -0,0 +1,113 @@ +/* + Copyright (c) 2017, Facebook, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#pragma once + +/* C++ standard header files */ +#include +#include +#include +#include +#include +#include +#include + +/* MySQL header files */ +#include "./my_global.h" +#include "./my_stacktrace.h" + +/* MyRocks header files */ +#include "./rdb_utils.h" + +namespace myrocks { + +class Rdb_io_watchdog { + const int RDB_IO_WRITE_BUFFER_SIZE = 4096; + const char *const RDB_IO_DUMMY_FILE_NAME = "myrocks_io_watchdog_write_file"; + + private: + timer_t m_io_check_timer, m_io_check_watchdog_timer; + std::atomic m_io_in_progress; + std::vector m_dirs_to_check; + uint32_t m_write_timeout; + mysql_mutex_t m_reset_mutex; + char *m_buf; + + int check_write_access(const std::string &dirname) const; + void io_check_callback(union sigval timer_data); + void expire_io_callback(union sigval timer_data); + + int stop_timers() { + int ret = 0; + + if (m_io_check_watchdog_timer) { + ret = timer_delete(m_io_check_watchdog_timer); + + if (!ret) { + m_io_check_watchdog_timer = nullptr; + } + } + + if (m_io_check_timer && !ret) { + ret = timer_delete(m_io_check_timer); + + if (!ret) { + m_io_check_timer = nullptr; + } + } + + return ret; + } + + static void io_check_callback_wrapper(union sigval timer_data) { + Rdb_io_watchdog *io_watchdog = + static_cast(timer_data.sival_ptr); + DBUG_ASSERT(io_watchdog != nullptr); + + io_watchdog->io_check_callback(timer_data); + } + + static void expire_io_callback_wrapper(union sigval timer_data) { + Rdb_io_watchdog *io_watchdog = + static_cast(timer_data.sival_ptr); + DBUG_ASSERT(io_watchdog != nullptr); + + io_watchdog->expire_io_callback(timer_data); + } + + public: + explicit Rdb_io_watchdog(const std::vector &directories) + : m_io_check_timer(nullptr), m_io_check_watchdog_timer(nullptr), + m_io_in_progress(false), m_dirs_to_check(std::move(directories)), + m_buf(nullptr) { + DBUG_ASSERT(m_dirs_to_check.size() > 0); + mysql_mutex_init(0, &m_reset_mutex, MY_MUTEX_INIT_FAST); + } + + ~Rdb_io_watchdog() { + // We're shutting down. Ignore errors possibly coming from timer deletion. + static_cast(stop_timers()); + mysql_mutex_destroy(&m_reset_mutex); + free(m_buf); + } + + int reset_timeout(const uint32_t &write_timeout); + + Rdb_io_watchdog(const Rdb_io_watchdog &) = delete; + Rdb_io_watchdog &operator=(const Rdb_io_watchdog &) = delete; +}; + +} // namespace myrocks diff --git a/storage/rocksdb/rdb_perf_context.cc b/storage/rocksdb/rdb_perf_context.cc index 5ebc6cdab5c6d..be75efa73e570 100644 --- a/storage/rocksdb/rdb_perf_context.cc +++ b/storage/rocksdb/rdb_perf_context.cc @@ -83,14 +83,14 @@ std::string rdb_pc_stat_types[] = { #define IO_PERF_RECORD(_field_) \ do { \ - if (rocksdb::perf_context._field_ > 0) \ - counters->m_value[idx] += rocksdb::perf_context._field_; \ + if (rocksdb::get_perf_context()->_field_ > 0) \ + counters->m_value[idx] += rocksdb::get_perf_context()->_field_; \ idx++; \ } while (0) #define IO_STAT_RECORD(_field_) \ do { \ - if (rocksdb::iostats_context._field_ > 0) \ - counters->m_value[idx] += rocksdb::iostats_context._field_; \ + if (rocksdb::get_iostats_context()->_field_ > 0) \ + counters->m_value[idx] += rocksdb::get_iostats_context()->_field_; \ idx++; \ } while (0) @@ -171,8 +171,8 @@ bool Rdb_io_perf::start(const uint32_t perf_context_level) { return false; } - rocksdb::perf_context.Reset(); - rocksdb::iostats_context.Reset(); + rocksdb::get_perf_context()->Reset(); + rocksdb::get_iostats_context()->Reset(); return true; } @@ -189,34 +189,36 @@ void Rdb_io_perf::end_and_record(const uint32_t perf_context_level) { } harvest_diffs(&rdb_global_perf_counters); - if (m_shared_io_perf_read && (rocksdb::perf_context.block_read_byte != 0 || - rocksdb::perf_context.block_read_count != 0 || - rocksdb::perf_context.block_read_time != 0)) { + if (m_shared_io_perf_read && + (rocksdb::get_perf_context()->block_read_byte != 0 || + rocksdb::get_perf_context()->block_read_count != 0 || + rocksdb::get_perf_context()->block_read_time != 0)) { my_io_perf_t io_perf_read; io_perf_read.init(); - io_perf_read.bytes = rocksdb::perf_context.block_read_byte; - io_perf_read.requests = rocksdb::perf_context.block_read_count; + io_perf_read.bytes = rocksdb::get_perf_context()->block_read_byte; + io_perf_read.requests = rocksdb::get_perf_context()->block_read_count; /* Rocksdb does not distinguish between I/O service and wait time, so just use svc time. */ io_perf_read.svc_time_max = io_perf_read.svc_time = - rocksdb::perf_context.block_read_time; + rocksdb::get_perf_context()->block_read_time; m_shared_io_perf_read->sum(io_perf_read); m_stats->table_io_perf_read.sum(io_perf_read); } if (m_stats) { - if (rocksdb::perf_context.internal_key_skipped_count != 0) { - m_stats->key_skipped += rocksdb::perf_context.internal_key_skipped_count; + if (rocksdb::get_perf_context()->internal_key_skipped_count != 0) { + m_stats->key_skipped += + rocksdb::get_perf_context()->internal_key_skipped_count; } - if (rocksdb::perf_context.internal_delete_skipped_count != 0) { + if (rocksdb::get_perf_context()->internal_delete_skipped_count != 0) { m_stats->delete_skipped += - rocksdb::perf_context.internal_delete_skipped_count; + rocksdb::get_perf_context()->internal_delete_skipped_count; } } } diff --git a/storage/rocksdb/rdb_psi.h b/storage/rocksdb/rdb_psi.h index 2df3b96a64d22..0a62f411ade62 100644 --- a/storage/rocksdb/rdb_psi.h +++ b/storage/rocksdb/rdb_psi.h @@ -19,6 +19,7 @@ /* MySQL header files */ #include +#include #include /* MyRocks header files */ diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index b6fd14f3ccb5d..9afe9807317fd 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -20,6 +20,7 @@ /* C++ standard header files */ #include #include +#include #include /* MySQL header files */ @@ -37,17 +38,17 @@ namespace myrocks { -Rdb_sst_file::Rdb_sst_file(rocksdb::DB *const db, - rocksdb::ColumnFamilyHandle *const cf, - const rocksdb::DBOptions &db_options, - const std::string &name, const bool tracing) +Rdb_sst_file_ordered::Rdb_sst_file::Rdb_sst_file( + rocksdb::DB *const db, rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const std::string &name, + const bool tracing) : m_db(db), m_cf(cf), m_db_options(db_options), m_sst_file_writer(nullptr), - m_name(name), m_tracing(tracing) { + m_name(name), m_tracing(tracing), m_comparator(cf->GetComparator()) { DBUG_ASSERT(db != nullptr); DBUG_ASSERT(cf != nullptr); } -Rdb_sst_file::~Rdb_sst_file() { +Rdb_sst_file_ordered::Rdb_sst_file::~Rdb_sst_file() { // Make sure we clean up delete m_sst_file_writer; m_sst_file_writer = nullptr; @@ -58,7 +59,7 @@ Rdb_sst_file::~Rdb_sst_file() { std::remove(m_name.c_str()); } -rocksdb::Status Rdb_sst_file::open() { +rocksdb::Status Rdb_sst_file_ordered::Rdb_sst_file::open() { DBUG_ASSERT(m_sst_file_writer == nullptr); rocksdb::ColumnFamilyDescriptor cf_descr; @@ -69,13 +70,11 @@ rocksdb::Status Rdb_sst_file::open() { } // Create an sst file writer with the current options and comparator - const rocksdb::Comparator *comparator = m_cf->GetComparator(); - const rocksdb::EnvOptions env_options(m_db_options); const rocksdb::Options options(m_db_options, cf_descr.options); m_sst_file_writer = - new rocksdb::SstFileWriter(env_options, options, comparator, m_cf); + new rocksdb::SstFileWriter(env_options, options, m_comparator, m_cf); s = m_sst_file_writer->Open(m_name); if (m_tracing) { @@ -92,15 +91,19 @@ rocksdb::Status Rdb_sst_file::open() { return s; } -rocksdb::Status Rdb_sst_file::put(const rocksdb::Slice &key, - const rocksdb::Slice &value) { +rocksdb::Status +Rdb_sst_file_ordered::Rdb_sst_file::put(const rocksdb::Slice &key, + const rocksdb::Slice &value) { DBUG_ASSERT(m_sst_file_writer != nullptr); // Add the specified key/value to the sst file writer +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" return m_sst_file_writer->Add(key, value); } -std::string Rdb_sst_file::generateKey(const std::string &key) { +std::string +Rdb_sst_file_ordered::Rdb_sst_file::generateKey(const std::string &key) { static char const hexdigit[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; @@ -117,7 +120,7 @@ std::string Rdb_sst_file::generateKey(const std::string &key) { } // This function is run by the background thread -rocksdb::Status Rdb_sst_file::commit() { +rocksdb::Status Rdb_sst_file_ordered::Rdb_sst_file::commit() { DBUG_ASSERT(m_sst_file_writer != nullptr); rocksdb::Status s; @@ -167,13 +170,153 @@ rocksdb::Status Rdb_sst_file::commit() { return s; } +void Rdb_sst_file_ordered::Rdb_sst_stack::push(const rocksdb::Slice &key, + const rocksdb::Slice &value) { + if (m_buffer == nullptr) { + m_buffer = new char[m_buffer_size]; + } + + // Put the actual key and value data unto our stack + size_t key_offset = m_offset; + memcpy(m_buffer + m_offset, key.data(), key.size()); + m_offset += key.size(); + memcpy(m_buffer + m_offset, value.data(), value.size()); + m_offset += value.size(); + + // Push just the offset, the key length and the value length onto the stack + m_stack.push(std::make_tuple(key_offset, key.size(), value.size())); +} + +std::pair +Rdb_sst_file_ordered::Rdb_sst_stack::top() { + size_t offset, key_len, value_len; + // Pop the next item off the internal stack + std::tie(offset, key_len, value_len) = m_stack.top(); + + // Make slices from the offset (first), key length (second), and value + // length (third) + DBUG_ASSERT(m_buffer != nullptr); + rocksdb::Slice key(m_buffer + offset, key_len); + rocksdb::Slice value(m_buffer + offset + key_len, value_len); + + return std::make_pair(key, value); +} + +Rdb_sst_file_ordered::Rdb_sst_file_ordered( + rocksdb::DB *const db, rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const std::string &name, + const bool tracing, size_t max_size) + : m_use_stack(false), m_first(true), m_stack(max_size), + m_file(db, cf, db_options, name, tracing) { + m_stack.reset(); +} + +rocksdb::Status Rdb_sst_file_ordered::apply_first() { + rocksdb::Slice first_key_slice(m_first_key); + rocksdb::Slice first_value_slice(m_first_value); + rocksdb::Status s; + + if (m_use_stack) { + // Put the first key onto the stack + m_stack.push(first_key_slice, first_value_slice); + } else { + // Put the first key into the SST + s = m_file.put(first_key_slice, first_value_slice); + if (!s.ok()) { + return s; + } + } + + // Clear out the 'first' strings for next key/value + m_first_key.clear(); + m_first_value.clear(); + + return s; +} + +rocksdb::Status Rdb_sst_file_ordered::put(const rocksdb::Slice &key, + const rocksdb::Slice &value) { + rocksdb::Status s; + + // If this is the first key, just store a copy of the key and value + if (m_first) { + m_first_key = key.ToString(); + m_first_value = value.ToString(); + m_first = false; + return rocksdb::Status::OK(); + } + + // If the first key is not empty we must be the second key. Compare the + // new key with the first key to determine if the data will go straight + // the SST or be put on the stack to be retrieved later. + if (!m_first_key.empty()) { + rocksdb::Slice first_key_slice(m_first_key); + int cmp = m_file.compare(first_key_slice, key); + DBUG_ASSERT(cmp != 0); + m_use_stack = (cmp > 0); + + // Apply the first key to the stack or SST + s = apply_first(); + if (!s.ok()) { + return s; + } + } + + // Put this key on the stack or into the SST + if (m_use_stack) { + m_stack.push(key, value); + } else { + s = m_file.put(key, value); + } + + return s; +} + +rocksdb::Status Rdb_sst_file_ordered::commit() { + rocksdb::Status s; + + // Make sure we get the first key if it was the only key given to us. + if (!m_first_key.empty()) { + s = apply_first(); + if (!s.ok()) { + return s; + } + } + + if (m_use_stack) { + rocksdb::Slice key; + rocksdb::Slice value; + + // We are ready to commit, pull each entry off the stack (which reverses + // the original data) and send it to the SST file. + while (!m_stack.empty()) { + std::tie(key, value) = m_stack.top(); + s = m_file.put(key, value); + if (!s.ok()) { + return s; + } + + m_stack.pop(); + } + + // We have pulled everything off the stack, reset for the next time + m_stack.reset(); + m_use_stack = false; + } + + // reset m_first + m_first = true; + + return m_file.commit(); +} + Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, const std::string &indexname, rocksdb::ColumnFamilyHandle *const cf, const rocksdb::DBOptions &db_options, const bool &tracing) : m_db(db), m_cf(cf), m_db_options(db_options), m_curr_size(0), - m_sst_count(0), m_error_msg(""), + m_sst_count(0), m_background_error(HA_EXIT_SUCCESS), #if defined(RDB_SST_INFO_USE_THREAD) m_queue(), m_mutex(), m_cond(), m_thread(nullptr), m_finished(false), #endif @@ -220,15 +363,16 @@ int Rdb_sst_info::open_new_sst_file() { const std::string name = m_prefix + std::to_string(m_sst_count++) + m_suffix; // Create the new sst file object - m_sst_file = new Rdb_sst_file(m_db, m_cf, m_db_options, name, m_tracing); + m_sst_file = new Rdb_sst_file_ordered(m_db, m_cf, m_db_options, + name, m_tracing, m_max_size); // Open the sst file const rocksdb::Status s = m_sst_file->open(); if (!s.ok()) { - set_error_msg(m_sst_file->get_name(), s.ToString()); + set_error_msg(m_sst_file->get_name(), s); delete m_sst_file; m_sst_file = nullptr; - return HA_EXIT_FAILURE; + return HA_ERR_ROCKSDB_BULK_LOAD; } m_curr_size = 0; @@ -259,7 +403,8 @@ void Rdb_sst_info::close_curr_sst_file() { #else const rocksdb::Status s = m_sst_file->commit(); if (!s.ok()) { - set_error_msg(m_sst_file->get_name(), s.ToString()); + set_error_msg(m_sst_file->get_name(), s); + set_background_error(HA_ERR_ROCKSDB_BULK_LOAD); } delete m_sst_file; @@ -273,14 +418,14 @@ void Rdb_sst_info::close_curr_sst_file() { int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { int rc; - if (m_curr_size >= m_max_size) { + if (m_curr_size + key.size() + value.size() >= m_max_size) { // The current sst file has reached its maximum, close it out close_curr_sst_file(); // While we are here, check to see if we have had any errors from the // background thread - we don't want to wait for the end to report them - if (!m_error_msg.empty()) { - return HA_EXIT_FAILURE; + if (have_background_error()) { + return get_and_reset_background_error(); } } @@ -297,8 +442,8 @@ int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { // Add the key/value to the current sst file const rocksdb::Status s = m_sst_file->put(key, value); if (!s.ok()) { - set_error_msg(m_sst_file->get_name(), s.ToString()); - return HA_EXIT_FAILURE; + set_error_msg(m_sst_file->get_name(), s); + return HA_ERR_ROCKSDB_BULK_LOAD; } m_curr_size += key.size() + value.size(); @@ -326,25 +471,36 @@ int Rdb_sst_info::commit() { #endif // Did we get any errors? - if (!m_error_msg.empty()) { - return HA_EXIT_FAILURE; + if (have_background_error()) { + return get_and_reset_background_error(); } return HA_EXIT_SUCCESS; } void Rdb_sst_info::set_error_msg(const std::string &sst_file_name, - const std::string &msg) { + const rocksdb::Status &s) { #if defined(RDB_SST_INFO_USE_THREAD) // Both the foreground and background threads can set the error message // so lock the mutex to protect it. We only want the first error that // we encounter. const std::lock_guard guard(m_mutex); #endif - my_printf_error(ER_UNKNOWN_ERROR, "[%s] bulk load error: %s", MYF(0), - sst_file_name.c_str(), msg.c_str()); - if (m_error_msg.empty()) { - m_error_msg = "[" + sst_file_name + "] " + msg; + if (s.IsInvalidArgument() && + strcmp(s.getState(), "Keys must be added in order") == 0) { + my_printf_error(ER_KEYS_OUT_OF_ORDER, + "Rows must be inserted in primary key order " + "during bulk load operation", + MYF(0)); + } else if (s.IsInvalidArgument() && + strcmp(s.getState(), "Global seqno is required, but disabled") == + 0) { + my_printf_error(ER_OVERLAPPING_KEYS, "Rows inserted during bulk load " + "must not overlap existing rows", + MYF(0)); + } else { + my_printf_error(ER_UNKNOWN_ERROR, "[%s] bulk load error: %s", MYF(0), + sst_file_name.c_str(), s.ToString().c_str()); } } @@ -355,15 +511,15 @@ void Rdb_sst_info::thread_fcn(void *object) { } void Rdb_sst_info::run_thread() { - const std::unique_lock lk(m_mutex); + std::unique_lock lk(m_mutex); do { // Wait for notification or 1 second to pass m_cond.wait_for(lk, std::chrono::seconds(1)); - // Inner loop pulls off all Rdb_sst_file entries and processes them + // Inner loop pulls off all Rdb_sst_file_ordered entries and processes them while (!m_queue.empty()) { - const Rdb_sst_file *const sst_file = m_queue.front(); + Rdb_sst_file_ordered *const sst_file = m_queue.front(); m_queue.pop(); // Release the lock - we don't want to hold it while committing the file @@ -372,7 +528,8 @@ void Rdb_sst_info::run_thread() { // Close out the sst file and add it to the database const rocksdb::Status s = sst_file->commit(); if (!s.ok()) { - set_error_msg(sst_file->get_name(), s.ToString()); + set_error_msg(sst_file->get_name(), s); + set_background_error(HA_ERR_ROCKSDB_BULK_LOAD); } delete sst_file; diff --git a/storage/rocksdb/rdb_sst_info.h b/storage/rocksdb/rdb_sst_info.h index 09c0edce0978a..358b1933592d1 100644 --- a/storage/rocksdb/rdb_sst_info.h +++ b/storage/rocksdb/rdb_sst_info.h @@ -21,46 +21,100 @@ #include #include #include +#include #include #include +#include #include /* RocksDB header files */ #include "rocksdb/db.h" #include "rocksdb/sst_file_writer.h" -// define RDB_SST_INFO_USE_THREAD /* uncomment to use threads */ +/* MyRocks header files */ +#include "./rdb_utils.h" -namespace myrocks { - -class Rdb_sst_file { -private: - Rdb_sst_file(const Rdb_sst_file &p) = delete; - Rdb_sst_file &operator=(const Rdb_sst_file &p) = delete; - - rocksdb::DB *const m_db; - rocksdb::ColumnFamilyHandle *const m_cf; - const rocksdb::DBOptions &m_db_options; - rocksdb::SstFileWriter *m_sst_file_writer; - const std::string m_name; - const bool m_tracing; - - std::string generateKey(const std::string &key); +// #define RDB_SST_INFO_USE_THREAD /* uncomment to use threads */ -public: - Rdb_sst_file(rocksdb::DB *const db, rocksdb::ColumnFamilyHandle *const cf, - const rocksdb::DBOptions &db_options, const std::string &name, - const bool tracing); - ~Rdb_sst_file(); +namespace myrocks { - rocksdb::Status open(); +class Rdb_sst_file_ordered { + private: + class Rdb_sst_file { + private: + Rdb_sst_file(const Rdb_sst_file &p) = delete; + Rdb_sst_file &operator=(const Rdb_sst_file &p) = delete; + + rocksdb::DB *const m_db; + rocksdb::ColumnFamilyHandle *const m_cf; + const rocksdb::DBOptions &m_db_options; + rocksdb::SstFileWriter *m_sst_file_writer; + const std::string m_name; + const bool m_tracing; + const rocksdb::Comparator *m_comparator; + + std::string generateKey(const std::string &key); + + public: + Rdb_sst_file(rocksdb::DB *const db, rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, const std::string &name, + const bool tracing); + ~Rdb_sst_file(); + + rocksdb::Status open(); + rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value); + rocksdb::Status commit(); + + inline const std::string get_name() const { return m_name; } + inline int compare(rocksdb::Slice key1, rocksdb::Slice key2) { + return m_comparator->Compare(key1, key2); + } + }; + + class Rdb_sst_stack { + private: + char *m_buffer; + size_t m_buffer_size; + size_t m_offset; + std::stack> m_stack; + + public: + explicit Rdb_sst_stack(size_t max_size) + : m_buffer(nullptr), m_buffer_size(max_size) {} + ~Rdb_sst_stack() { delete[] m_buffer; } + + void reset() { m_offset = 0; } + bool empty() { return m_stack.empty(); } + void push(const rocksdb::Slice &key, const rocksdb::Slice &value); + std::pair top(); + void pop() { m_stack.pop(); } + size_t size() { return m_stack.size(); } + }; + + bool m_use_stack; + bool m_first; + std::string m_first_key; + std::string m_first_value; + Rdb_sst_stack m_stack; + Rdb_sst_file m_file; + + rocksdb::Status apply_first(); + + public: + Rdb_sst_file_ordered(rocksdb::DB *const db, + rocksdb::ColumnFamilyHandle *const cf, + const rocksdb::DBOptions &db_options, + const std::string &name, const bool tracing, + size_t max_size); + + inline rocksdb::Status open() { return m_file.open(); } rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value); rocksdb::Status commit(); - const std::string get_name() const { return m_name; } + inline const std::string get_name() const { return m_file.get_name(); } }; class Rdb_sst_info { -private: + private: Rdb_sst_info(const Rdb_sst_info &p) = delete; Rdb_sst_info &operator=(const Rdb_sst_info &p) = delete; @@ -70,23 +124,24 @@ class Rdb_sst_info { uint64_t m_curr_size; uint64_t m_max_size; uint m_sst_count; - std::string m_error_msg; + std::atomic m_background_error; std::string m_prefix; static std::atomic m_prefix_counter; static std::string m_suffix; #if defined(RDB_SST_INFO_USE_THREAD) - std::queue m_queue; + std::queue m_queue; std::mutex m_mutex; std::condition_variable m_cond; std::thread *m_thread; bool m_finished; #endif - Rdb_sst_file *m_sst_file; + Rdb_sst_file_ordered *m_sst_file; const bool m_tracing; int open_new_sst_file(); void close_curr_sst_file(); - void set_error_msg(const std::string &sst_file_name, const std::string &msg); + void set_error_msg(const std::string &sst_file_name, + const rocksdb::Status &s); #if defined(RDB_SST_INFO_USE_THREAD) void run_thread(); @@ -94,7 +149,7 @@ class Rdb_sst_info { static void thread_fcn(void *object); #endif -public: + public: Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename, const std::string &indexname, rocksdb::ColumnFamilyHandle *const cf, @@ -104,7 +159,22 @@ class Rdb_sst_info { int put(const rocksdb::Slice &key, const rocksdb::Slice &value); int commit(); - const std::string &error_message() const { return m_error_msg; } + bool have_background_error() { return m_background_error != 0; } + + int get_and_reset_background_error() { + int ret = m_background_error; + while (!m_background_error.compare_exchange_weak(ret, HA_EXIT_SUCCESS)) { + // Do nothing + } + + return ret; + } + + void set_background_error(int code) { + int expected = HA_EXIT_SUCCESS; + // Only assign 'code' into the error if it is already 0, otherwise ignore it + m_background_error.compare_exchange_strong(expected, code); + } static void init(const rocksdb::DB *const db); }; diff --git a/storage/rocksdb/rdb_threads.h b/storage/rocksdb/rdb_threads.h index e5cbb52c54e0e..876ff5698f347 100644 --- a/storage/rocksdb/rdb_threads.h +++ b/storage/rocksdb/rdb_threads.h @@ -81,6 +81,7 @@ class Rdb_thread { pthread_setname_np resolves the issue. */ DBUG_ASSERT(!m_name.empty()); +#ifdef __linux__ int err = pthread_setname_np(m_handle, m_name.c_str()); if (err) { @@ -89,6 +90,7 @@ class Rdb_thread { "MyRocks: Failed to set name (%s) for current thread, errno=%d", m_name.c_str(), errno); } +#endif } void uninit(); diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index daa766ad87187..7e66f22991df8 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -21,6 +21,7 @@ #include #include #include +#include /* C standard header files */ #include @@ -289,4 +290,17 @@ bool rdb_database_exists(const std::string &db_name) { return true; } +void rdb_log_status_error(const rocksdb::Status &s, const char *msg) { + if (msg == nullptr) { + // NO_LINT_DEBUG + sql_print_error("RocksDB: status error, code: %d, error message: %s", + s.code(), s.ToString().c_str()); + return; + } + + // NO_LINT_DEBUG + sql_print_error("RocksDB: %s, Status Code: %d, Status: %s", msg, s.code(), + s.ToString().c_str()); +} + } // namespace myrocks diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 58db29b3079b5..5b12e9301446c 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -27,6 +27,7 @@ /* RocksDB header files */ #include "rocksdb/slice.h" +#include "rocksdb/status.h" #ifdef HAVE_JEMALLOC #include @@ -238,6 +239,8 @@ inline void rdb_check_mutex_call_result(const char *function_name, } } +void rdb_log_status_error(const rocksdb::Status &s, const char *msg = nullptr); + /* Helper functions to parse strings. */ diff --git a/storage/rocksdb/rocksdb-range-access.txt b/storage/rocksdb/rocksdb-range-access.txt index c974279ac7709..6b5a0db938af6 100644 --- a/storage/rocksdb/rocksdb-range-access.txt +++ b/storage/rocksdb/rocksdb-range-access.txt @@ -38,39 +38,17 @@ When we need to seek to a tuple that is a prefix of a full key: ( kv )-ccc-pk ( kv )-bbb-pk3 ( kv )-bbb-pk2 - ( kv )-bbb-pk1 < -- We need to be here -# ( kv )-bbb <---we call Seek(kv-bbb) - ( kv )-aaa-pk ... and end up here. Should call it->Prev(). + ( kv )-bbb-pk1 <--- SeekForPrev("kv-bbb") will put us here on the previous + record. +# ( kv )-bbb <--- "kv-bbb" doesn't exist in the database, but it would be + ( kv )-aaa-pk here. -There is a special case when (kv)-bbb-pk1 is the last record in the CF, and -we get invalid iterator. Then, we need to call SeekToLast(). - -Another kind of special case is when we need to seek to the full value. -Suppose, the lookup tuple is kv-bbb-pk1: - - (kv+1)-xxx-pk - ( kv )-ccc-pk - ( kv )-bbb-pk3 - ( kv )-bbb-pk2 - ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1) - ( kv )-bbb-pk0 - -Then, Seek(kv-bbb-pk1) will position us exactly the tuple we need, and we -won't need to call it->Prev(). If we get an invalid iterator, there is no -need to call SeekToLast(). +Even when (kv)-bbb-pk1 is the last record in the CF, SeekForPrev() will find the +last record before "kv-bbb", so it already takes care of this case for us. RocksDB calls: - it->Seek(tuple); - - if (!using_full_key) - { - if (!it->Valid()) - it->SeekToLast(); - else - it->Prev(); - } - + it->SeekForPrev(kv); if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) return record. @@ -90,7 +68,7 @@ If lookup tuple is kv-bbb: RocksDB calls: Seek(kv); - if (it->Valid() && kd->covers_key(..) && kd->cmp_full_keys(...)) + if (it->Valid() && kd->covers_key(..)) return record. == HA_READ_KEY_OR_NEXT, backward CF == @@ -101,12 +79,13 @@ When specified key tuple is a key prefix: ( kv )-ccc-pk ( kv )-bbb-pk3 ( kv )-bbb-pk2 - ( kv )-bbb-pk1 < -- We need to be here (or above) -# ( kv )-bbb <---we call Seek(kv-bbb) - ( kv )-aaa-pk ... and end up here. Should call it->Prev(). + ( kv )-bbb-pk1 <--- Seek("kv-bbb") will put us here on the previous record. +# ( kv )-bbb <--- "kv-bbb" doesn't exist in the database, but it would be + here. + ( kv )-aaa-pk -There is a special case when (kv)-bbb-pk1 is the last record in the CF, and -we get invalid iterator. Then, we need to call SeekToLast(). +Even when (kv)-bbb-pk1 is the last record in the CF, SeekForPrev() will find the +last record before "kv-bbb", so it already takes care of this case for us. Another kind of special case is when we need to seek to the full value. Suppose, the lookup tuple is kv-bbb-pk1: @@ -115,28 +94,16 @@ Suppose, the lookup tuple is kv-bbb-pk1: ( kv )-ccc-pk ( kv )-bbb-pk3 ( kv )-bbb-pk2 - ( kv )-bbb-pk1 < -- Seek(kv-bbb-pk1) + ( kv )-bbb-pk1 < -- SeekForPrev(kv-bbb-pk1) ( kv )-bbb-pk0 -Then, Seek(kv-bbb-pk1) may position us exactly at the tuple we need, and we -won't need to call it->Prev(). -If kv-bbb-pk1 is not present in the database, we will be positioned on -kv-bbb-pk0, and we will need to call it->Prev(). -If we get an invalid iterator, we DO need to call SeekToLast(). +Then, SeekForPrev(kv-bbb-pk1) may position us exactly at the tuple we need. +Even If kv-bbb-pk1 is not present in the database, we will be positioned on +kv-bbb-pk2 no matter whether kv-bbb-pk2 is the last key or not. RocksDB calls: - Seek(...); - - if (!it->Valid()) - it->SeekToLast(); - else - { - if (!using_full_key || - !(kd->covers_key(...) || kd->cmp_full_keys(...)) - it->Prev(); - } - + SeekForPrev(...); if (it->Valid() && kd->covers_key(..)) return record. @@ -153,7 +120,7 @@ Suppose lookup_key = kv-bbb ( kv )-bbb-pk3 ( kv )-bbb-pk4 ( kv )-bbb-pk5 - ( kv )-ccc-pkN <-- That is, we need to be here. + ( kv )-ccc-pkN <--- That is, we need to be here. However, we don't know that the next value is kv-ccc. Instead, we seek to the first value that strictly greater than 'kv-bbb'. It is Successor(kv-bbb). @@ -163,7 +130,7 @@ It doesn't matter if we're using a full extended key or not. RocksDB calls: Seek(Successor(kv-bbb)); - if (it->Valid() && kd->covers_key(it.key())) + if (it->Valid() && kd->covers_key(...)) return record; Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that @@ -175,47 +142,25 @@ Suppose, the lookup key is 'kv-bbb': (kv+1)-xxx-pk ( kv )-ccc-pk7 - ( kv )-ccc-pk6 <-- We need to be here. -# Successor(kv-bbb) <-- We get here when we call Seek(Successor(kv-bbb)) - ( kv )-bbb-pk5 and we will need to call Prev() (*) + ( kv )-ccc-pk6 <-- We get here when we call Seek(Successor(kv-bbb)) +# Successor(kv-bbb) + ( kv )-bbb-pk5 ( kv )-bbb-pk4 ( kv )-bbb-pk3 ( kv )-bbb-pk2 ( kv )-bbb-pk1 -# ( kv )-bbb <-- We would get here if we called Seek(kv-bbb). +# ( kv )-bbb <-- We would get here if we called SeekForPrev(kv-bbb). ( kv )-aaa-pk -(*) - unless Successor(kv-bbb)=(kv-ccc), and Seek(kv-ccc) hits the row. In -that case, we won't need to call Prev(). - RocksDB calls: - Seek(Successor(kv-bbb)); - if (!it->Valid()) - { - /* - We may get EOF if rows with 'kv-bbb' (below the Successor... line in the - diagram) do not exist. This doesn't mean that rows with values kv-ccc - do not exist. - */ - it->SeekToLast(); - } - else - { - if (!using_full_key || - !kd->value_matches_prefix(...)) - { - it->Prev(); - } - } - + SeekForPrev(Successor(kv-bbb)); if (it->Valid() && kd->covers_key(...)) return record. Note that the code is the same as with HA_READ_KEY_OR_NEXT, except that we seek to Successor($lookup_key) instead of $lookup_key itself. - == HA_READ_BEFORE_KEY, forward CF == This is finding max(key) such that key < lookup_tuple. @@ -224,31 +169,29 @@ Suppose, lookup_tuple=kv-bbb. ( kv )-aaa-pk1 ( kv )-aaa-pk2 - ( kv )-aaa-pk3 <-- Need to be here. + ( kv )-aaa-pk3 <-- SeekForPrev("kv-bbb") will put us here. # ( kv )-bbb - ( kv )-bbb-pk4 <-- Seek("kv-bbb") will put us here. + ( kv )-bbb-pk4 ( kv )-bbb-pk5 ( kv )-bbb-pk6 -1. Seek(kv-bbb) will put us at kv-bbb-pk4 (or return an invalid iterator - if kv-bbb-pk4 and subsequent rows do not exist in the db). -2. We will need to call Prev() to get to the record before. - (if there is no record before kv-bbb, then we can't find a record). - -It doesn't matter if we're using a full extended key or not. +If the lookup tuple is a full key (e.g. kv-bbb-pk3), and the key is present in +the database, the iterator will be positioned on the key. We will need to call +Prev() to get the next key. RocksDB calls: - it->Seek(kv-bbb); - if (it->Valid()) + it->SeekForPrev(kv-bbb); + if (it->Valid() && using_full_key && + kd->value_matches_prefix(...)) + { + /* We are using full key and we've hit an exact match */ it->Prev(); - else - it->SeekToLast(); + } if (it->Valid() && kd->covers_key(...)) return record; - == HA_READ_BEFORE_KEY, backward CF == This is finding max(key) such that key < lookup_tuple. @@ -269,7 +212,6 @@ Next() to get the next key. RocksDB calls: it->Seek(kv-bbb); - if (it->Valid() && using_full_key && kd->value_matches_prefix(...)) { @@ -292,19 +234,16 @@ Suppose, lookup_tuple='kv-bbb' ( kv )-bbb-pk4 ( kv )-bbb-pk5 ( kv )-bbb-pk6 - ( kv )-bbb-pk7 <--- Need to be here. + ( kv )-bbb-pk7 <--- SeekForPrev(Successor(kv-bbb)) will get us here # ( kv )-ccc - ( kv )-ccc-pk8 <-- Seek(Successor(kv-bbb)) will get us here. will need - ( kv )-ccc-pk9 to call Prev(). + ( kv )-ccc-pk8 + ( kv )-ccc-pk9 RocksDB calls: - Seek(Successor(kv-bbb)); - if (!it->Valid()) - it->SeekToLast(); - else + SeekForPrev(Successor(kv-bbb)); + if (using_full_key && it->Valid() && !cmp_full_keys(Sucessor(lookup_key))) it->Prev(); - if (it->Valid() && kd->covers_key(...)) { if (!cmp_full_keys(lookup_tuple)) // not needed in _OR_PREV From a89d01fb60dd9feec94262eb62f345080122a51a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Jul 2017 11:06:22 +0000 Subject: [PATCH 02/34] Trivial updates to get a few rocksdb.* testcases to pass --- .../include/restart_mysqld_with_option.inc | 31 +++++++++++++++++++ .../rocksdb/r/allow_no_primary_key.result | 2 +- .../r/corrupted_data_reads_debug.result | 10 +++--- .../mysql-test/rocksdb/r/foreign_key.result | 8 ++--- .../rocksdb/r/index_merge_rocksdb.result | 2 +- .../mysql-test/rocksdb/r/rocksdb_parts.result | 2 +- .../r/ttl_primary_read_filtering.result | 16 ++++++++++ .../r/ttl_primary_with_partitions.result | 12 +++---- .../rocksdb/t/bulk_load_rev_cf.test | 1 + .../rocksdb/t/bulk_load_rev_cf_and_data.test | 1 + .../rocksdb/t/bulk_load_rev_data.test | 1 + .../rocksdb/mysql-test/rocksdb/t/disabled.def | 3 ++ .../t/ttl_primary_with_partitions.test | 1 + .../rocksdb/t/varbinary_format.test | 8 ++--- .../mysql-test/rocksdb/t/write_sync.test | 1 - 15 files changed, 76 insertions(+), 23 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc new file mode 100644 index 0000000000000..4250b368b1a8b --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/restart_mysqld_with_option.inc @@ -0,0 +1,31 @@ + +if ($rpl_inited) +{ + if (!$allow_rpl_inited) + { + --die ERROR IN TEST: This script does not support replication + } +} + +# Write file to make mysql-test-run.pl expect the "crash", but don't start +# it until it's told to +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect +--exec echo "wait" > $_expect_file_name + +# Send shutdown to the connected server and give +# it 10 seconds to die before zapping it +shutdown_server 10; + +# Write file to make mysql-test-run.pl start up the server again +--exec echo "restart:$_mysqld_option" > $_expect_file_name + +# Turn on reconnect +--enable_reconnect + +# Call script that will poll the server waiting for it to be back online again +--source include/wait_until_connected_again.inc + +# Turn off reconnect again +--disable_reconnect + diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result index d86792a64692d..5bffab7491702 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result @@ -241,7 +241,7 @@ a b 2 b EXPLAIN SELECT * FROM t1 WHERE a = 2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 const a a 5 const 1 NULL +1 SIMPLE t1 const a a 5 const 1 DROP TABLE t1; CREATE TABLE t1 (a INT, b CHAR(8)) ENGINE=rocksdb; SHOW CREATE TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result index cd6bde2b3813a..5cf9c620341f7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/corrupted_data_reads_debug.result @@ -20,7 +20,7 @@ set @tmp1=@@rocksdb_verify_row_debug_checksums; set rocksdb_verify_row_debug_checksums=1; set session debug_dbug= "+d,myrocks_simulate_bad_row_read1"; select * from t1 where pk=1; -ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB +ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB set session debug_dbug= "-d,myrocks_simulate_bad_row_read1"; set rocksdb_verify_row_debug_checksums=@tmp1; select * from t1 where pk=1; @@ -28,11 +28,11 @@ pk col1 1 1 set session debug_dbug= "+d,myrocks_simulate_bad_row_read2"; select * from t1 where pk=1; -ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB +ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB set session debug_dbug= "-d,myrocks_simulate_bad_row_read2"; set session debug_dbug= "+d,myrocks_simulate_bad_row_read3"; select * from t1 where pk=1; -ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB +ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB set session debug_dbug= "-d,myrocks_simulate_bad_row_read3"; insert into t1 values(4,'0123456789'); select * from t1; @@ -56,7 +56,7 @@ pk col1 ABCD 1 set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; select * from t2; -ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB +ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; create table t2 ( @@ -69,6 +69,6 @@ pk col1 ABCD 1 set session debug_dbug= "+d,myrocks_simulate_bad_pk_read1"; select * from t2; -ERROR HY000: Got error 199 'Found data corruption.' from ROCKSDB +ERROR HY000: Got error 200 'Found data corruption.' from ROCKSDB set session debug_dbug= "-d,myrocks_simulate_bad_pk_read1"; drop table t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result index 5ffd2774ca240..fa3809e97583f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/foreign_key.result @@ -1,16 +1,16 @@ DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1 (b INT PRIMARY KEY); CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL, FOREIGN KEY (b) REFERENCES t1(b)); -ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' +ERROR 42000: This version of MariaDB doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' CREATE TABLE t2 (a INT NOT NULL, bforeign INT NOT NULL); DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL, foreignkey INT NOT NULL); DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL, bforeign INT not null, FOREIGN KEY (bforeign) REFERENCES t1(b)); -ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' +ERROR 42000: This version of MariaDB doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' CREATE TABLE t2 (a INT NOT NULL, b INT NOT NULL); ALTER TABLE t2 ADD FOREIGN KEY (b) REFERENCES t1(b); -ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' +ERROR 42000: This version of MariaDB doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL); ALTER TABLE t2 ADD bforeign INT NOT NULL; @@ -20,6 +20,6 @@ ALTER TABLE t2 ADD foreignkey INT NOT NULL; DROP TABLE t2; CREATE TABLE t2 (a INT NOT NULL); ALTER TABLE t2 ADD bforeign INT NOT NULL, ADD FOREIGN KEY (bforeign) REFERENCES t1(b); -ERROR 42000: This version of MySQL doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' +ERROR 42000: This version of MariaDB doesn't yet support 'FOREIGN KEY for the RocksDB storage engine' DROP TABLE t2; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result index 22c8592ff282d..f63a271cdce90 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result @@ -31,7 +31,7 @@ test.t1 analyze status OK set global rocksdb_force_flush_memtable_now=1; explain select * from t1 where key1 = 1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref key1 key1 5 const # NULL +1 SIMPLE t1 ref key1 key1 5 const # explain select key1,key2 from t1 where key1 = 1 or key2 = 1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL # Using union(key1,key2); Using where diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result index b90706e613ef7..9b7ee0de3889d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result @@ -94,7 +94,7 @@ drop table t1, t2; CREATE TABLE t1 (c1 INT NOT NULL, c2 CHAR(5)) PARTITION BY HASH(c1) PARTITIONS 4; INSERT INTO t1 VALUES(1,'a'); RENAME TABLE t1 TO db3.t3; -ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: -1 - Unknown error -1) +ERROR HY000: Error on rename of './test/t1' to './db3/t3' (errno: -1 "Internal error < 0 (Not system error)") SELECT * FROM t1; c1 c2 1 a diff --git a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result index 0a91fe3fcbd85..1df6e838bcdb2 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_read_filtering.result @@ -171,11 +171,14 @@ SELECT * FROM t1; a b 998 5 DROP TABLE t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; CREATE TABLE t1 ( a int PRIMARY KEY ) ENGINE=rocksdb COMMENT='ttl_duration=5;'; INSERT INTO t1 values (1); +connection con1; # Creating Snapshot (start transaction) BEGIN; SELECT * FROM t1; @@ -185,11 +188,13 @@ SELECT * FROM t1; a 1 # Switching to connection 2 +connection con2; set global rocksdb_force_flush_memtable_now=1; set global rocksdb_compact_cf='default'; SELECT * FROM t1; a # Switching to connection 1 +connection con1; SELECT * FROM t1; a 1 @@ -201,6 +206,10 @@ COMMIT; SELECT * FROM t1; a DROP TABLE t1; +disconnect con1; +disconnect con2; +connect con1,localhost,root,,; +connect con2,localhost,root,,; set global rocksdb_force_flush_memtable_now=1; set global rocksdb_compact_cf='default'; CREATE TABLE t1 ( @@ -208,11 +217,13 @@ a int PRIMARY KEY ) ENGINE=rocksdb COMMENT='ttl_duration=1;'; # On Connection 1 +connection con1; # Creating Snapshot (start transaction) BEGIN; SELECT * FROM t1; a # On Connection 2 +connection con2; set global rocksdb_debug_ttl_rec_ts = -2; INSERT INTO t1 values (1); INSERT INTO t1 values (3); @@ -222,9 +233,11 @@ set global rocksdb_debug_ttl_rec_ts = 0; set global rocksdb_force_flush_memtable_now=1; set global rocksdb_compact_cf='default'; # On Connection 1 +connection con1; SELECT * FROM t1; a # On Connection 2 +connection con2; SELECT * FROM t1; a set global rocksdb_enable_ttl_read_filtering=0; @@ -235,4 +248,7 @@ a 5 7 set global rocksdb_enable_ttl_read_filtering=1; +disconnect con2; +disconnect con1; +connection default; DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result index 3816accad8cb1..d6d9e290e9fac 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/ttl_primary_with_partitions.result @@ -144,16 +144,16 @@ set global rocksdb_debug_ttl_rec_ts = 0; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `c1` int(11) NOT NULL DEFAULT '0', - `c2` int(11) NOT NULL DEFAULT '0', + `c1` int(11) NOT NULL, + `c2` int(11) NOT NULL, `name` varchar(25) NOT NULL, `event` date DEFAULT NULL, PRIMARY KEY (`c2`,`c1`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz;' ) ENGINE=ROCKSDB DEFAULT CHARSET=latin1 COMMENT='custom_p0_ttl_duration=9999;custom_p2_ttl_duration=5;' -/*!50100 PARTITION BY LIST (c1) -(PARTITION custom_p0 VALUES IN (1,2,3) ENGINE = ROCKSDB, - PARTITION custom_p1 VALUES IN (4,5,6) ENGINE = ROCKSDB, - PARTITION custom_p2 VALUES IN (7,8,9) ENGINE = ROCKSDB) */ + PARTITION BY LIST (`c1`) +(PARTITION `custom_p0` VALUES IN (1,2,3) ENGINE = ROCKSDB, + PARTITION `custom_p1` VALUES IN (4,5,6) ENGINE = ROCKSDB, + PARTITION `custom_p2` VALUES IN (7,8,9) ENGINE = ROCKSDB) set global rocksdb_debug_ttl_snapshot_ts = 100; set global rocksdb_force_flush_memtable_now=1; set @@global.rocksdb_compact_cf = 'baz'; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test index 5aec6ff5e99ac..fdacb22a73cb7 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc --let pk_cf=rev:cf1 --let data_order_desc=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test index 83006f9e446bd..11709ac18be50 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc --let pk_cf=rev:cf1 --let data_order_desc=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test index df7b6a7c821be..a3bcd1b060c43 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc --let pk_cf=cf1 --let data_order_desc=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 883f4475952f7..fbec7eec6a19a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -70,3 +70,6 @@ col_opt_null : : MDEV-12474 - Fails in fulltest col_opt_unsigned : MDEV-12474 - Fails in fulltest col_opt_zerofill : MDEV-12474 - Fails in fulltest type_float : MDEV-12474 - Fails in fulltest + +native_procedure : Not supported in MariaDB + diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test index dd1a97b32df02..aba2b594db543 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary_with_partitions.test @@ -1,5 +1,6 @@ --source include/have_debug.inc --source include/have_rocksdb.inc +--source include/have_partition.inc # # Create a table with multiple partitions, but in the comment don't specify diff --git a/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test b/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test index d00a8b7afbe1a..d10082bb95c27 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/varbinary_format.test @@ -34,11 +34,11 @@ DROP TABLE t1; # Now create the same table in the old format to show that they can be read # and handled correctly -set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; CREATE TABLE t1( vb VARBINARY(64) primary key ) ENGINE=rocksdb; -set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; INSERT INTO t1 values(0x00); INSERT INTO t1 values(0x0000); @@ -98,11 +98,11 @@ DROP TABLE t1; # Now create the same table in the old format to show that they can be read # and handled correctly -set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; CREATE TABLE t1( vc VARCHAR(64) collate 'binary' primary key ) ENGINE=rocksdb; -set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; INSERT INTO t1 values('a'); INSERT INTO t1 values('aa'); diff --git a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test index 0b96b5977971c..ca953afd65d7b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/write_sync.test @@ -5,7 +5,6 @@ SET GLOBAL rocksdb_write_disable_wal=false; SET GLOBAL rocksdb_write_ignore_missing_column_families=true; create table aaa (id int primary key, i int) engine rocksdb; -set @save_rocksdb_flush_log_at_trx_commit= @@global.rocksdb_flush_log_at_trx_commit; set @save_rocksdb_flush_log_at_trx_commit=@@global.rocksdb_flush_log_at_trx_commit; SET GLOBAL rocksdb_flush_log_at_trx_commit=0; sleep 30; From be6c4f5d1589f9c7b8ed9e5042a1a4ad12bd3444 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Jul 2017 13:24:55 +0000 Subject: [PATCH 03/34] More post-merge updates to get the tests pass --- .../rocksdb/include/deadlock_stats.inc | 46 +++++++++++++++++++ .../rocksdb/include/simple_deadlock.inc | 29 ++++++++++++ .../rocksdb/r/varbinary_format.result | 8 ++-- 3 files changed, 79 insertions(+), 4 deletions(-) create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc create mode 100644 storage/rocksdb/mysql-test/rocksdb/include/simple_deadlock.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc b/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc new file mode 100644 index 0000000000000..40df82e1314b7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc @@ -0,0 +1,46 @@ +let $prior_set_lwt = `select concat('set @prior_lock_wait_timeout = @@', + '$engine', '_lock_wait_timeout;')`; +let $prior_set_dld = `select concat('set @prior_deadlock_detect = @@', + '$engine', '_deadlock_detect;')`; +let $global_dld = `select concat('set global ', '$engine', + '_deadlock_detect = on;')`; +let $global_lwt = `select concat('set global ', '$engine', + '_lock_wait_timeout = 100000;')`; +eval $prior_set_lwt $prior_set_dld $global_dld $global_lwt; + +--source include/count_sessions.inc +connect (con1,localhost,root,,); +let $con1= `SELECT CONNECTION_ID()`; + +connect (con2,localhost,root,,); +let $con2= `SELECT CONNECTION_ID()`; + +connection default; +eval create table t (i int primary key) engine=$engine; +insert into t values (1), (2), (3); + +--source include/simple_deadlock.inc +connection default; +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; + +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; +--source include/simple_deadlock.inc +connection default; +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; + +select row_lock_deadlocks from information_schema.table_statistics where +table_name = "t"; + +disconnect con1; +disconnect con2; + +let $restore_lwt = `select concat('set global ', '$engine', + '_lock_wait_timeout = @prior_lock_wait_timeout;')`; +let $restore_dld = `select concat('set global ', '$engine', + '_deadlock_detect = @prior_deadlock_detect;')`; +eval $restore_lwt $restore_dld; +drop table t; +--source include/wait_until_count_sessions.inc diff --git a/storage/rocksdb/mysql-test/rocksdb/include/simple_deadlock.inc b/storage/rocksdb/mysql-test/rocksdb/include/simple_deadlock.inc new file mode 100644 index 0000000000000..0afdfea76dbf7 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/include/simple_deadlock.inc @@ -0,0 +1,29 @@ +connection con1; +begin; +select * from t where i=1 for update; + +connection con2; +begin; +select * from t where i=2 for update; + +connection con1; +--send select * from t where i=2 for update + +connection con2; +if ($engine == "rocksdb"){ + let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx + where thread_id = $con1 and waiting_key != ""; +} +if ($engine == "innodb"){ + let $wait_condition = select count(*) = 1 from information_schema.innodb_trx + where trx_mysql_thread_id = $con1 and trx_state="LOCK WAIT"; +} +--source include/wait_condition.inc + +--error ER_LOCK_DEADLOCK +select * from t where i=1 for update; +rollback; + +connection con1; +--reap +rollback; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result b/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result index 9362d42515cf6..8b4634bb6e20b 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/varbinary_format.result @@ -62,11 +62,11 @@ SUBSTRING(a.key,9) 000000000000000009000000000000000009000000000000000009000000000000000009000000000000000001 ROLLBACK; DROP TABLE t1; -set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; CREATE TABLE t1( vb VARBINARY(64) primary key ) ENGINE=rocksdb; -set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; INSERT INTO t1 values(0x00); INSERT INTO t1 values(0x0000); INSERT INTO t1 values(0x0000000000000000); @@ -192,11 +192,11 @@ SUBSTRING(a.key,9) 616161616161616109616161616161616109616161616161616109616161616161616109610000000000000001 ROLLBACK; DROP TABLE t1; -set session debug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '+d,MYROCKS_LEGACY_VARBINARY_FORMAT'; CREATE TABLE t1( vc VARCHAR(64) collate 'binary' primary key ) ENGINE=rocksdb; -set session debug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; +set session debug_dbug= '-d,MYROCKS_LEGACY_VARBINARY_FORMAT'; INSERT INTO t1 values('a'); INSERT INTO t1 values('aa'); INSERT INTO t1 values('aaaaaaaa'); From b2617cae3fa15c940f4b6c671d8f445ac67d3237 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Jul 2017 17:24:10 +0300 Subject: [PATCH 04/34] Post-merge fix: fix compilation --- storage/rocksdb/rdb_mariadb_port.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/storage/rocksdb/rdb_mariadb_port.h b/storage/rocksdb/rdb_mariadb_port.h index aa15ff8e02ff8..1661338030f4c 100644 --- a/storage/rocksdb/rdb_mariadb_port.h +++ b/storage/rocksdb/rdb_mariadb_port.h @@ -7,7 +7,10 @@ #include "my_global.h" /* ulonglong */ #include "atomic_stat.h" + +// These are for split_into_vector: #include +#include /* The following is copied from storage/innobase/univ.i: */ #ifndef MY_ATTRIBUTE From a95ace491b49323a537d568f3124b6ca76aa2564 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Jul 2017 15:26:55 +0000 Subject: [PATCH 05/34] Test result updates: don't hardcode error numbers in the tests. --- .../rocksdb/mysql-test/rocksdb/t/ttl_primary.test | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test index 92200e859ed0b..833b6fdd91aa0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/ttl_primary.test @@ -279,7 +279,7 @@ SELECT a FROM t1; DROP TABLE t1; # TTL field with nullable ttl column (should fail) ---error 1948 +--error ER_RDB_TTL_COL_FORMAT CREATE TABLE t1 ( a bigint(20) UNSIGNED NOT NULL, b int NOT NULL, @@ -290,7 +290,7 @@ CREATE TABLE t1 ( COMMENT='ttl_duration=1;ttl_col=ts;'; # TTL field with non 8-bit integer column (should fail) ---error 1948 +--error ER_RDB_TTL_COL_FORMAT CREATE TABLE t1 ( a bigint(20) UNSIGNED NOT NULL, b int NOT NULL, @@ -301,7 +301,7 @@ CREATE TABLE t1 ( COMMENT='ttl_duration=1;ttl_col=ts;'; # TTL duration as some random garbage value ---error 1949 +--error ER_RDB_TTL_DURATION_FORMAT CREATE TABLE t1 ( a bigint(20) UNSIGNED NOT NULL, b int NOT NULL, @@ -311,7 +311,7 @@ CREATE TABLE t1 ( COMMENT='ttl_duration=abc;'; # TTL col is some column outside of the table ---error 1948 +--error ER_RDB_TTL_COL_FORMAT CREATE TABLE t1 ( a bigint(20) UNSIGNED NOT NULL, b int NOT NULL, @@ -321,7 +321,7 @@ CREATE TABLE t1 ( COMMENT='ttl_duration=1;ttl_col=abc;'; # TTL col must have accompanying duration ---error 1948 +--error ER_RDB_TTL_COL_FORMAT CREATE TABLE t1 ( a bigint(20) UNSIGNED NOT NULL, b int NOT NULL, @@ -372,9 +372,9 @@ CREATE TABLE t1 ( ) ENGINE=rocksdb COMMENT='ttl_duration=100;'; ---error 1947 +--error ER_RDB_TTL_UNSUPPORTED ALTER TABLE t1 DROP PRIMARY KEY; ---error 1947 +--error ER_RDB_TTL_UNSUPPORTED ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE; DROP TABLE t1; From d083c2490ab4a5df57947167372e00b7ff3967b1 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Jul 2017 17:05:14 +0000 Subject: [PATCH 06/34] Make rocksdb.bloomfilter_skip pass. --- .../rocksdb/r/bloomfilter_skip.result | 20 +++++++++++-------- .../mysql-test/rocksdb/t/bloomfilter.inc | 12 +++++------ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result index b3336aec6ead3..2496f349427df 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter_skip.result @@ -823,7 +823,7 @@ index id2_id4 (id2, id4) COMMENT 'rev:cf_short_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_short_prefix', index id3_id2 (id3, id2) COMMENT 'rev:cf_short_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -839,6 +839,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'rev:cf_short_prefix', index id3_id4 (id3, id4) COMMENT 'rev:cf_short_prefix', index id3_id5 (id3, id5) COMMENT 'rev:cf_short_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -1203,9 +1207,7 @@ count(*) call bloom_end(); checked false -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -1237,6 +1239,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -1601,9 +1607,7 @@ count(*) call bloom_end(); checked false -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -1619,7 +1623,7 @@ index id2_id4 (id2, id4) COMMENT 'rev:cf_long_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_long_prefix', index id3_id2 (id3, id2) COMMENT 'rev:cf_long_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc index 9d62f590c04b3..6644d93cbc58d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/bloomfilter.inc @@ -23,18 +23,18 @@ DELIMITER ;// --source bloomfilter_table_def.inc --source bloomfilter_load_select.inc ---exec sed s/##CF##/" COMMENT 'rev:cf_short_prefix'"/g $tmpl_ddl > $ddl ---source $ddl ---source suite/rocksdb/t/bloomfilter_load_select.inc +--let $CF=COMMENT 'rev:cf_short_prefix' +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc #BF is most of the time invoked and useful --let $CF=COMMENT 'cf_long_prefix' --source bloomfilter_table_def.inc --source bloomfilter_load_select.inc ---exec sed s/##CF##/" COMMENT 'rev:cf_long_prefix'"/g $tmpl_ddl > $ddl ---source $ddl ---source suite/rocksdb/t/bloomfilter_load_select.inc +--let $CF=COMMENT 'rev:cf_long_prefix' +--source bloomfilter_table_def.inc +--source bloomfilter_load_select.inc # BUG: Prev() with prefix lookup should not use prefix bloom filter From 123187dfed9b50e89c2970006fef9eebb2ec99e7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 29 Jul 2017 18:06:46 +0000 Subject: [PATCH 07/34] Fix the merge of rocksdb_sys_vars test suite --- .../rocksdb_sys_vars/include/rocksdb_sys_var.inc | 7 ------- .../t/rocksdb_update_cf_options_basic.test | 3 ++- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc index eeb49e077a4e0..6ba9302667482 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/include/rocksdb_sys_var.inc @@ -17,13 +17,6 @@ if (!$suppress_default_value) SELECT @start_session_value; } } -SELECT @start_global_value; -if ($session) -{ - --eval SET @start_session_value = @@session.$sys_var - SELECT @start_session_value; -} -} if (!$read_only) { diff --git a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test index 15d5d870ae648..0e675dafed395 100644 --- a/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test +++ b/storage/rocksdb/mysql-test/rocksdb_sys_vars/t/rocksdb_update_cf_options_basic.test @@ -1,4 +1,5 @@ --source include/have_rocksdb.inc +--source include/have_partition.inc call mtr.add_suppression("MyRocks: NULL is not a valid option for updates to column family settings."); call mtr.add_suppression("Invalid cf options, '=' expected *"); @@ -91,4 +92,4 @@ SELECT @@global.rocksdb_update_cf_options; USE test; -DROP TABLE t1; \ No newline at end of file +DROP TABLE t1; From c90753e671e961269f8d052c35445144abbe1b00 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 09:03:42 +0000 Subject: [PATCH 08/34] Follow the upstream MyRocks: provide details in the ER_LOCK_DEADLOCK message This fixes result mismatches in rocksdb.issue111, rocksdb.hermitage, rocksdb.rocksdb_locks --- sql/handler.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/sql/handler.cc b/sql/handler.cc index a4a2297bd2f2a..a336d9916f171 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -3490,10 +3490,17 @@ void handler::print_error(int error, myf errflag) textno=ER_LOCK_TABLE_FULL; break; case HA_ERR_LOCK_DEADLOCK: - textno=ER_LOCK_DEADLOCK; + { + String str, full_err_msg(ER_DEFAULT(ER_LOCK_DEADLOCK), system_charset_info); + /* cannot continue. the statement was already aborted in the engine */ SET_FATAL_ERROR; - break; + + get_error_message(error, &str); + full_err_msg.append(str); + my_printf_error(ER_LOCK_DEADLOCK, "%s", errflag, full_err_msg.c_ptr_safe()); + DBUG_VOID_RETURN; + } case HA_ERR_READ_ONLY_TRANSACTION: textno=ER_READ_ONLY_TRANSACTION; break; From 1e6b02e688827dc434a6d7b076f81cb646630d74 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 10:42:39 +0000 Subject: [PATCH 09/34] MDEV-13404: MyRocks upstream uses I_S.table_statistics.row_lock_deadlocks Comment out a part of testcase that uses it. --- .../rocksdb/include/deadlock_stats.inc | 6 ++ .../rocksdb/r/deadlock_stats.result | 55 +++---------------- 2 files changed, 15 insertions(+), 46 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc b/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc index 40df82e1314b7..48ef6f816bdce 100644 --- a/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc +++ b/storage/rocksdb/mysql-test/rocksdb/include/deadlock_stats.inc @@ -19,6 +19,11 @@ connection default; eval create table t (i int primary key) engine=$engine; insert into t values (1), (2), (3); +--echo # +--echo # The following is disabled due: +--echo # MDEV-13404: MyRocks upstream uses I_S.table_statistics.row_lock_deadlocks, should we import? +--echo # +--disable_parsing --source include/simple_deadlock.inc connection default; select row_lock_deadlocks from information_schema.table_statistics where @@ -33,6 +38,7 @@ table_name = "t"; select row_lock_deadlocks from information_schema.table_statistics where table_name = "t"; +--enable_parsing disconnect con1; disconnect con2; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result b/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result index 9b62cade9ca23..79cb6bb0f6115 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/deadlock_stats.result @@ -1,51 +1,14 @@ set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout; set @prior_deadlock_detect = @@rocksdb_deadlock_detect; set global rocksdb_deadlock_detect = on; set global rocksdb_lock_wait_timeout = 100000;; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection default; create table t (i int primary key) engine=rocksdb; insert into t values (1), (2), (3); -begin; -select * from t where i=1 for update; -i -1 -begin; -select * from t where i=2 for update; -i -2 -select * from t where i=2 for update; -select * from t where i=1 for update; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction -rollback; -i -2 -rollback; -select row_lock_deadlocks from information_schema.table_statistics where -table_name = "t"; -row_lock_deadlocks -1 -select row_lock_deadlocks from information_schema.table_statistics where -table_name = "t"; -row_lock_deadlocks -1 -begin; -select * from t where i=1 for update; -i -1 -begin; -select * from t where i=2 for update; -i -2 -select * from t where i=2 for update; -select * from t where i=1 for update; -ERROR 40001: Deadlock found when trying to get lock; try restarting transaction -rollback; -i -2 -rollback; -select row_lock_deadlocks from information_schema.table_statistics where -table_name = "t"; -row_lock_deadlocks -2 -select row_lock_deadlocks from information_schema.table_statistics where -table_name = "t"; -row_lock_deadlocks -2 +# +# The following is disabled due: +# MDEV-13404: MyRocks upstream uses I_S.table_statistics.row_lock_deadlocks, should we import? +# +disconnect con1; +disconnect con2; set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout; set global rocksdb_deadlock_detect = @prior_deadlock_detect;; drop table t; From 894c797eaf41cbc387447bc6e7cdd5f9e9ca922f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 10:51:27 +0000 Subject: [PATCH 10/34] Mark all tests that are derived from rocksdb.bulk_load as "big". (Could we just put the mark into bulk_load.inc ?) --- storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test | 2 ++ .../rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test | 2 ++ storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test | 2 ++ 3 files changed, 6 insertions(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test index fdacb22a73cb7..7c4d7aef0e5d0 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/big_test.inc + --let pk_cf=rev:cf1 --let data_order_desc=0 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test index 11709ac18be50..a31e86753f35e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_cf_and_data.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/big_test.inc + --let pk_cf=rev:cf1 --let data_order_desc=1 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test index a3bcd1b060c43..f36990ed5671f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_rev_data.test @@ -1,6 +1,8 @@ --source include/have_rocksdb.inc --source include/have_partition.inc +--source include/big_test.inc + --let pk_cf=cf1 --let data_order_desc=1 From d74e43e7bc626faac90e1b0a273b118a87de1cce Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 11:12:54 +0000 Subject: [PATCH 11/34] Fix rocksdb.duplicate_table test --- storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test index 9ac89a128c90d..875f8514d8f59 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/duplicate_table.test @@ -8,7 +8,7 @@ INSERT INTO t values (1), (2), (3); CREATE TABLE t(id int primary key) engine=rocksdb; FLUSH TABLES; move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp; ---error ER_UNKNOWN_ERROR +--error ER_METADATA_INCONSISTENCY CREATE TABLE t(id int primary key) engine=rocksdb; move_file $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm.tmp $MYSQLTEST_VARDIR/mysqld.1/data/test/t.frm; FLUSH TABLES; From da9c6692109b7944323b2719cb9e427caeadd896 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 12:09:51 +0000 Subject: [PATCH 12/34] Fixes for a few more post-merge test failures --- .../mysql-test/rocksdb/r/bloomfilter.result | 20 +++++++++++-------- .../r/cons_snapshot_read_committed.result | 12 +++++------ .../r/cons_snapshot_repeatable_read.result | 2 +- .../mysql-test/rocksdb/r/i_s_ddl.result | 4 ++-- .../rocksdb/mysql-test/rocksdb/t/disabled.def | 1 + 5 files changed, 22 insertions(+), 17 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result index 22034d0fbba84..5f1083a1bb28f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter.result @@ -823,7 +823,7 @@ index id2_id4 (id2, id4) COMMENT 'rev:cf_short_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_short_prefix', index id3_id2 (id3, id2) COMMENT 'rev:cf_short_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -839,6 +839,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'rev:cf_short_prefix', index id3_id4 (id3, id4) COMMENT 'rev:cf_short_prefix', index id3_id5 (id3, id5) COMMENT 'rev:cf_short_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -1203,9 +1207,7 @@ count(*) call bloom_end(); checked true -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -1237,6 +1239,10 @@ index id2_id4_id5 (id2, id4, id5) COMMENT 'cf_long_prefix', index id3_id4 (id3, id4) COMMENT 'cf_long_prefix', index id3_id5 (id3, id5) COMMENT 'cf_long_prefix' ) engine=ROCKSDB; +insert t1 +select (seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc" + from seq_1_to_10000; +insert t2 select * from t1; call bloom_start(); select count(*) from t1; count(*) @@ -1601,9 +1607,7 @@ count(*) call bloom_end(); checked false -drop table if exists t1; -drop table if exists t2; -create table t1 ( +create or replace table t1 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, @@ -1619,7 +1623,7 @@ index id2_id4 (id2, id4) COMMENT 'rev:cf_long_prefix', index id2_id3_id1_id4 (id2, id3, id1, id4) COMMENT 'rev:cf_long_prefix', index id3_id2 (id3, id2) COMMENT 'rev:cf_long_prefix' ) engine=ROCKSDB; -create table t2 ( +create or replace table t2 ( id1 bigint not null, id2 bigint not null, id3 varchar(100) not null, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result index 521edec0c83ff..637354e013ef8 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_read_committed.result @@ -5,7 +5,7 @@ connection con1; CREATE TABLE t1 (a INT, pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=ROCKSDB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1938 +ERROR: 4062 connection con2; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE @@ -18,7 +18,7 @@ STAT_TYPE VALUE DB_NUM_SNAPSHOTS 0 connection con1; START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1938 +ERROR: 4062 connection con2; INSERT INTO t1 (a) VALUES (1); connection con1; @@ -69,7 +69,7 @@ id value value2 5 5 5 6 6 6 START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1938 +ERROR: 4062 connection con2; INSERT INTO r1 values (7,7,7); connection con1; @@ -107,12 +107,12 @@ id value value2 7 7 7 8 8 8 START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1938 +ERROR: 4062 connection con2; INSERT INTO r1 values (9,9,9); connection con1; START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1938 +ERROR: 4062 connection con2; INSERT INTO r1 values (10,10,10); connection con1; @@ -129,7 +129,7 @@ id value value2 9 9 9 10 10 10 START TRANSACTION WITH CONSISTENT SNAPSHOT; -ERROR: 1938 +ERROR: 4062 INSERT INTO r1 values (11,11,11); ERROR: 0 SELECT * FROM r1; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result index 805a0aaa0fdd1..d9be37ee18dfc 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/cons_snapshot_repeatable_read.result @@ -125,7 +125,7 @@ id value value2 START TRANSACTION WITH CONSISTENT SNAPSHOT; ERROR: 0 INSERT INTO r1 values (11,11,11); -ERROR: 1935 +ERROR: 4059 SELECT * FROM r1; id value value2 1 1 1 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result index 1e2b75c0fda8c..2a1fcd781268c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/i_s_ddl.result @@ -11,7 +11,7 @@ TABLE_SCHEMA TABLE_NAME PARTITION_NAME INDEX_NAME INDEX_TYPE KV_FORMAT_VERSION C test is_ddl_t1 NULL PRIMARY 1 13 default test is_ddl_t1 NULL j 2 12 default test is_ddl_t1 NULL k 2 12 kl_cf -test is_ddl_t2 NULL PRIMARY 1 11 zy_cf -test is_ddl_t2 NULL x 2 11 default +test is_ddl_t2 NULL PRIMARY 1 13 zy_cf +test is_ddl_t2 NULL x 2 12 default DROP TABLE is_ddl_t1; DROP TABLE is_ddl_t2; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index fbec7eec6a19a..b7a61ba3b307f 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -7,6 +7,7 @@ level_serializable: Not supported slow_query_log: MDEV-11480 select_for_update_skip_locked_nowait: MDEV-11481 rpl_read_free: MDEV-10976 +lock_wait_timeout_stats: MDEV-13404 optimizer_loose_index_scans: MariaDB doesnt support Skip Scan From 4eec6d4ccacbaeec04a6522111624cf7af56be80 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 13:36:11 +0000 Subject: [PATCH 13/34] Update test result for rocksdb.tbl_opt_data_index_dir See MDEV-12279, MariaDB is still not able to produce nice error messages in this case. --- .../rocksdb/r/tbl_opt_data_index_dir.result | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result index 351f2289235db..7f31b4434f547 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/tbl_opt_data_index_dir.result @@ -1,16 +1,16 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data'; -ERROR HY000: Got error 195 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") show warnings; Level Code Message -Warning 1296 Got error 198 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +Warning 1296 Got error 196 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index'; -ERROR HY000: Got error 196 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") show warnings; Level Code Message -Warning 1296 Got error 199 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +Warning 1296 Got error 197 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") Warning 1030 Got error 140 "Wrong create options" from storage engine ROCKSDB CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id) @@ -21,7 +21,7 @@ PARTITION P1 VALUES LESS THAN (2000) DATA DIRECTORY = '/foo/bar/data/', PARTITION P2 VALUES LESS THAN (MAXVALUE) ); -ERROR HY000: Got error 195 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id) ( PARTITION P0 VALUES LESS THAN (1000) @@ -30,4 +30,4 @@ PARTITION P1 VALUES LESS THAN (2000) INDEX DIRECTORY = '/foo/bar/data/', PARTITION P2 VALUES LESS THAN (MAXVALUE) ); -ERROR HY000: Got error 196 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") From 6bf757a2a934fc2033297c8b699ea9b69948e81f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 30 Jul 2017 13:37:04 +0000 Subject: [PATCH 14/34] Disable rocksdb.issue243_transactionStatus It uses SHOW ENGINE TRANSACTION STATUS, which is not supported in MariaDB --- storage/rocksdb/mysql-test/rocksdb/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index b7a61ba3b307f..842b85f87c991 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -41,6 +41,7 @@ mysqldump : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key mysqldump2 : MariaRocks: MariaDB's mysqldump doesn't support --print-ordering-key show_engine : MariaRocks: MariaDB doesnt support SHOW ENGINE rocksdb TRANSACTION STATUS +issue243_transactionStatus: MariaDB doesnt support SHOW ENGINE rocksdb TRANSACTION STATUS rpl_row_not_found : MariaDB doesnt support slave_exec_mode='SEMI_STRICT' From 1388afcd840024d8e5d2b8f6e176a12f36933016 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 31 Jul 2017 13:44:15 +0000 Subject: [PATCH 15/34] MDEV-13413: After the merge rocksdb.drop_table fails with warnings - Fix the bad merge in drop_table.test - Remove the obsolete rocksdb_info_log_level=info_level option which caused warnings to be found in the error log. --- .../rocksdb/t/drop_table-master.opt | 1 - .../mysql-test/rocksdb/t/drop_table.test | 39 ------------------- 2 files changed, 40 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt index 4afcf2caa5d94..f40e9db55b29d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table-master.opt @@ -1,4 +1,3 @@ --rocksdb_max_subcompactions=1 ---rocksdb_info_log_level=info_level --rocksdb_default_cf_options=write_buffer_size=512k;target_file_size_base=512k;level0_file_num_compaction_trigger=2;level0_slowdown_writes_trigger=-1;level0_stop_writes_trigger=1000;max_bytes_for_level_base=1m diff --git a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test index 5543f3cb10e5d..09725adc55813 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/drop_table.test @@ -107,44 +107,5 @@ let $wait_condition = select count(*) = 0 where TYPE = 'DDL_DROP_INDEX_ONGOING'; --source include/wait_condition.inc -perl; - -sub print_array { - $str = shift; - $prev= $_[0]; - foreach (@_) { - $dummy_idx = $_ - $prev; - $prev= $_; - print "$str $dummy_idx\n"; - } -} - -$filename= "$ENV{MYSQLTEST_VARDIR}/log/mysqld.1.err"; -open(F, '<', $filename) || die("Can't open file $filename: $!"); -while () { - %a = @b = @c = () if /CURRENT_TEST/; - if (/Compacting away elements from dropped index \(\d+,(\d+)\): (\d+)/) { - $a{$1} += $2; - } - if (/Begin filtering dropped index \(\d+,(\d+)\)/) { - push @b, $1; - } - if (/Finished filtering dropped index \(\d+,(\d+)\)/) { - push @c, $1; - } -} - -$prev= 0; -foreach (sort {$a <=> $b} keys %a){ - if ($prev) { - $dummy_idx= $_ - $prev; - }else { - $dummy_idx= 0; - } - $prev= $_; -} -print_array("Begin filtering dropped index+", sort {$a <=> $b} @b); -print_array("Finished filtering dropped index+", sort {$a <=> $b} @c); -EOF # Cleanup drop table t1; From 61ca3cf524ba3ca956fd0b2a5c8105a13a091519 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 31 Jul 2017 17:34:47 +0000 Subject: [PATCH 16/34] Post-merge fix: Rdb_io_watchdog doesn't support windows So disable it there for now. --- storage/rocksdb/CMakeLists.txt | 12 +++++++++++- storage/rocksdb/ha_rocksdb.cc | 13 +++++++++++-- storage/rocksdb/rdb_io_watchdog.h | 4 ++++ 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 5db6d888bb6ba..ed43144eb330b 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -126,7 +126,7 @@ endif() INCLUDE(build_rocksdb.cmake) -ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib +set(rocksdb_aux_lib_sources ha_rocksdb_proto.h logger.h rdb_comparator.h @@ -143,6 +143,16 @@ ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib rdb_mariadb_port.h ) +if(WIN32) +else() + list(APPEND rocksdb_aux_lib_sources + rdb_io_watchdog.cc + rdb_io_watchdog.h + ) +endif() + +ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib ${rocksdb_aux_lib_sources}) + ADD_DEPENDENCIES(rocksdb_aux_lib GenError) # MARIAROCKS-TODO: how to properly depend on -lrt ? diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index ab21a9c6bcc1b..a0b8cca0791d1 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -164,8 +164,10 @@ Rdb_cf_manager cf_manager; Rdb_ddl_manager ddl_manager; const char *m_mysql_gtid; Rdb_binlog_manager binlog_manager; -Rdb_io_watchdog *io_watchdog = nullptr; +#ifndef _WIN32 +Rdb_io_watchdog *io_watchdog = nullptr; +#endif /** MyRocks background thread control N.B. This is besides RocksDB's own background threads @@ -544,15 +546,18 @@ static void rocksdb_set_io_write_timeout( void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) { DBUG_ASSERT(save != nullptr); DBUG_ASSERT(rdb != nullptr); +#ifndef _WIN32 DBUG_ASSERT(io_watchdog != nullptr); +#endif RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex); const uint32_t new_val = *static_cast(save); rocksdb_io_write_timeout_secs = new_val; +#ifndef _WIN32 io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); - +#endif RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex); } @@ -3971,8 +3976,10 @@ static int rocksdb_init_func(void *const p) { directories.push_back(myrocks::rocksdb_wal_dir); } +#ifndef _WIN32 io_watchdog = new Rdb_io_watchdog(directories); io_watchdog->reset_timeout(rocksdb_io_write_timeout_secs); +#endif // NO_LINT_DEBUG sql_print_information("MyRocks storage engine plugin has been successfully " @@ -4061,8 +4068,10 @@ static int rocksdb_done_func(void *const p) { delete commit_latency_stats; commit_latency_stats = nullptr; +#ifndef _WIN32 delete io_watchdog; io_watchdog = nullptr; +#endif // Disown the cache data since we're shutting down. // This results in memory leaks but it improved the shutdown time. diff --git a/storage/rocksdb/rdb_io_watchdog.h b/storage/rocksdb/rdb_io_watchdog.h index 0fb77536fb03e..de8c1b9500e68 100644 --- a/storage/rocksdb/rdb_io_watchdog.h +++ b/storage/rocksdb/rdb_io_watchdog.h @@ -34,6 +34,9 @@ namespace myrocks { +// Rdb_io_watchdog does not support Windows ATM. +#ifndef _WIN32 + class Rdb_io_watchdog { const int RDB_IO_WRITE_BUFFER_SIZE = 4096; const char *const RDB_IO_DUMMY_FILE_NAME = "myrocks_io_watchdog_write_file"; @@ -110,4 +113,5 @@ class Rdb_io_watchdog { Rdb_io_watchdog &operator=(const Rdb_io_watchdog &) = delete; }; +#endif } // namespace myrocks From 2963a49f728f08f25cedd664e109c09925225afd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 1 Aug 2017 08:50:25 +0000 Subject: [PATCH 17/34] Post-merge fix: Rdb_io_watchdog doesn't support windows Part #2. --- storage/rocksdb/CMakeLists.txt | 12 +----------- storage/rocksdb/rdb_io_watchdog.cc | 5 +++++ 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index ed43144eb330b..5db6d888bb6ba 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -126,7 +126,7 @@ endif() INCLUDE(build_rocksdb.cmake) -set(rocksdb_aux_lib_sources +ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib ha_rocksdb_proto.h logger.h rdb_comparator.h @@ -143,16 +143,6 @@ set(rocksdb_aux_lib_sources rdb_mariadb_port.h ) -if(WIN32) -else() - list(APPEND rocksdb_aux_lib_sources - rdb_io_watchdog.cc - rdb_io_watchdog.h - ) -endif() - -ADD_CONVENIENCE_LIBRARY(rocksdb_aux_lib ${rocksdb_aux_lib_sources}) - ADD_DEPENDENCIES(rocksdb_aux_lib GenError) # MARIAROCKS-TODO: how to properly depend on -lrt ? diff --git a/storage/rocksdb/rdb_io_watchdog.cc b/storage/rocksdb/rdb_io_watchdog.cc index 7b229eee47deb..cb58cc997fada 100644 --- a/storage/rocksdb/rdb_io_watchdog.cc +++ b/storage/rocksdb/rdb_io_watchdog.cc @@ -21,6 +21,8 @@ #include #include +#ifndef _WIN32 + namespace myrocks { void Rdb_io_watchdog::expire_io_callback(union sigval timer_data) { @@ -231,3 +233,6 @@ int Rdb_io_watchdog::reset_timeout(const uint32_t &write_timeout) { } } // namespace myrocks + +#endif + From 08997242577dad41738733829ea979af88c431db Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 1 Aug 2017 08:50:48 +0000 Subject: [PATCH 18/34] Use proper #include's --- storage/rocksdb/rdb_compact_filter.h | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/rdb_compact_filter.h b/storage/rocksdb/rdb_compact_filter.h index 4696a1985b67d..ea08ac458a68a 100644 --- a/storage/rocksdb/rdb_compact_filter.h +++ b/storage/rocksdb/rdb_compact_filter.h @@ -23,6 +23,7 @@ /* C++ system header files */ #include #include +#include /* RocksDB includes */ #include "rocksdb/compaction_filter.h" From fcb8d8e598dad66c8e2ecc4a576b3b250abcd623 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 1 Aug 2017 19:19:54 +0000 Subject: [PATCH 19/34] Make rocksdb.prefix_extractor_override work on Windows --- .../mysql-test/rocksdb/t/prefix_extractor_override.test | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test b/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test index 13d76bb5a3f8a..c6b910711719a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/prefix_extractor_override.test @@ -26,7 +26,9 @@ select variable_value into @u from information_schema.global_status where variab SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=1 AND id3=1; select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked'; ---exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err +# MariaDB: the following doesn't work on Windows and doesn't seem to be needed +# on Linux: +#--exec echo "" > $MYSQLTEST_VARDIR/log/mysqld.1.err --let $_mysqld_option=--rocksdb_override_cf_options=cf1={prefix_extractor=capped:26}; --echo From bf256392e6d4c480c953444a42b9dffd6216d90c Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Sat, 25 Feb 2017 16:13:32 -0500 Subject: [PATCH 20/34] Support for server error messages in Hindi. --- sql/share/CMakeLists.txt | 1 + sql/share/errmsg-utf8.txt | 221 +++++++++++++++++++++++++++++++++++++- 2 files changed, 221 insertions(+), 1 deletion(-) diff --git a/sql/share/CMakeLists.txt b/sql/share/CMakeLists.txt index e0d5fb6c1a791..1461c57c5c3fa 100644 --- a/sql/share/CMakeLists.txt +++ b/sql/share/CMakeLists.txt @@ -38,6 +38,7 @@ russian czech french serbian +hindi ) SET(files diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 1f4cf315f1c14..d96107cb45829 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -1,4 +1,4 @@ -languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u, bulgarian=bgn cp1251; +languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u, bulgarian=bgn cp1251, hindi=hindi utf8; default-language eng @@ -17,6 +17,7 @@ ER_NO fre "NON" ger "Nein" greek "ΟΧΙ" + hindi "नहीं" hun "NEM" kor "아니오" nor "NEI" @@ -37,6 +38,7 @@ ER_YES fre "OUI" ger "Ja" greek "ΝΑΙ" + hindi "हाँ" hun "IGEN" ita "SI" kor "예" @@ -59,6 +61,7 @@ ER_CANT_CREATE_FILE fre "Ne peut créer le fichier '%-.200s' (Errcode: %M)" ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %M)" greek "Αδύνατη η δημιουργία του αρχείου '%-.200s' (κωδικός λάθους: %M)" + hindi "फ़ाइल '%-.200s' नहीं बन सका (errno: %M)" hun "A '%-.200s' file nem hozhato letre (hibakod: %M)" ita "Impossibile creare il file '%-.200s' (errno: %M)" jpn "ファイル '%-.200s' を作成できません。(エラー番号: %M)" @@ -84,6 +87,7 @@ ER_CANT_CREATE_TABLE fre "Ne peut créer la table %`s.%`s (Errcode: %M)" ger "Kann Tabelle %`s.%`s nicht erzeugen (Fehler: %M)" greek "Αδύνατη η δημιουργία του πίνακα %`s.%`s (κωδικός λάθους: %M)" + hindi "टेबल '%`s.%`s' नहीं बन सका (errno: %M)" hun "A %`s.%`s tabla nem hozhato letre (hibakod: %M)" ita "Impossibile creare la tabella %`s.%`s (errno: %M)" jpn "%`s.%`s テーブルが作れません.(errno: %M)" @@ -108,6 +112,7 @@ ER_CANT_CREATE_DB fre "Ne peut créer la base '%-.192s' (Erreur %M)" ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %M)" greek "Αδύνατη η δημιουργία της βάσης δεδομένων '%-.192s' (κωδικός λάθους: %M)" + hindi "डेटाबेस '%-.192s' नहीं बन सका (errno: %M)" hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %M)" ita "Impossibile creare il database '%-.192s' (errno: %M)" jpn "データベース '%-.192s' を作成できません。(エラー番号: %M)" @@ -132,6 +137,7 @@ ER_DB_CREATE_EXISTS fre "Ne peut créer la base '%-.192s'; elle existe déjà" ger "Kann Datenbank '%-.192s' nicht erzeugen. Datenbank existiert bereits" greek "Αδύνατη η δημιουργία της βάσης δεδομένων '%-.192s'; Η βάση δεδομένων υπάρχει ήδη" + hindi "डेटाबेस '%-.192s' नहीं बन सकता है; यह डेटाबेस पहले से ही मौजूद है" hun "Az '%-.192s' adatbazis nem hozhato letre Az adatbazis mar letezik" ita "Impossibile creare il database '%-.192s'; il database esiste" jpn "データベース '%-.192s' を作成できません。データベースはすでに存在します。" @@ -156,6 +162,7 @@ ER_DB_DROP_EXISTS fre "Ne peut effacer la base '%-.192s'; elle n'existe pas" ger "Kann Datenbank '%-.192s' nicht löschen; Datenbank nicht vorhanden" greek "Αδύνατη η διαγραφή της βάσης δεδομένων '%-.192s'. Η βάση δεδομένων δεν υπάρχει" + hindi "डेटाबेस '%-.192s' ड्रॉप नहीं कर सकते हैं; यह डेटाबेस मौजूद नहीं है" hun "A(z) '%-.192s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" ita "Impossibile cancellare '%-.192s'; il database non esiste" jpn "データベース '%-.192s' を削除できません。データベースは存在しません。" @@ -180,6 +187,7 @@ ER_DB_DROP_DELETE fre "Ne peut effacer la base '%-.192s' (erreur %M)" ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %M)" greek "Παρουσιάστηκε πρόβλημα κατά τη διαγραφή της βάσης δεδομένων (αδύνατη η διαγραφή '%-.192s', κωδικός λάθους: %M)" + hindi "डेटाबेस ड्रॉप में त्रुटि हुई ('%-.192s' हटा नहीं सकते, errno: %M)" hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %M)" ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %M)" jpn "データベース削除エラー ('%-.192s' を削除できません。エラー番号: %M)" @@ -204,6 +212,7 @@ ER_DB_DROP_RMDIR fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %M)" ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %M)" greek "Παρουσιάστηκε πρόβλημα κατά τη διαγραφή της βάσης δεδομένων (αδύνατη η διαγραφή του φακέλλου '%-.192s', κωδικός λάθους: %M)" + hindi "डेटाबेस ड्रॉप में त्रुटि हुई ('%-.192s' rmdir नहीं कर सकते, errno: %M)" hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %M)" ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %M)" jpn "データベース削除エラー (ディレクトリ '%-.192s' を削除できません。エラー番号: %M)" @@ -228,6 +237,7 @@ ER_CANT_DELETE_FILE fre "Erreur en effaçant '%-.192s' (Errcode: %M)" ger "Fehler beim Löschen von '%-.192s' (Fehler: %M)" greek "Παρουσιάστηκε πρόβλημα κατά τη διαγραφή '%-.192s' (κωδικός λάθους: %M)" + hindi "'%-.192s' के हटाने पर त्रुटि हुई (errno: %M)" hun "Torlesi hiba: '%-.192s' (hibakod: %M)" ita "Errore durante la cancellazione di '%-.192s' (errno: %M)" jpn "ファイル '%-.192s' の削除エラー (エラー番号: %M)" @@ -252,6 +262,7 @@ ER_CANT_FIND_SYSTEM_REC fre "Ne peut lire un enregistrement de la table 'system'" ger "Datensatz in der Systemtabelle nicht lesbar" greek "Αδύνατη η ανάγνωση εγγραφής από πίνακα του συστήματος" + hindi "सिस्टम टेबल से रिकॉर्ड नहीं पढ़ सके" hun "Nem olvashato rekord a rendszertablaban" ita "Impossibile leggere il record dalla tabella di sistema" jpn "システム表のレコードを読み込めません。" @@ -276,6 +287,7 @@ ER_CANT_GET_STAT fre "Ne peut obtenir le status de '%-.200s' (Errcode: %M)" ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %M)" greek "Αδύνατη η λήψη πληροφοριών για την κατάσταση του '%-.200s' (κωδικός λάθους: %M)" + hindi "'%-.200s' की अवस्था प्राप्त नहीं कर सके (errno: %M)" hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %M)" ita "Impossibile leggere lo stato di '%-.200s' (errno: %M)" jpn "'%-.200s' の状態を取得できません。(エラー番号: %M)" @@ -300,6 +312,7 @@ ER_CANT_GET_WD fre "Ne peut obtenir le répertoire de travail (Errcode: %M)" ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %M)" greek "Ο φάκελλος εργασίας δεν βρέθηκε (κωδικός λάθους: %M)" + hindi "Working डाइरेक्टरी प्राप्त नहीं कर सके (errno: %M)" hun "A munkakonyvtar nem allapithato meg (hibakod: %M)" ita "Impossibile leggere la directory di lavoro (errno: %M)" jpn "作業ディレクトリを取得できません。(エラー番号: %M)" @@ -324,6 +337,7 @@ ER_CANT_LOCK fre "Ne peut verrouiller le fichier (Errcode: %M)" ger "Datei kann nicht gesperrt werden (Fehler: %M)" greek "Το αρχείο δεν μπορεί να κλειδωθεί (κωδικός λάθους: %M)" + hindi "फ़ाइल लॉक नहीं कर सके (errno: %M)" hun "A file nem zarolhato. (hibakod: %M)" ita "Impossibile il locking il file (errno: %M)" jpn "ファイルをロックできません。(エラー番号: %M)" @@ -348,6 +362,7 @@ ER_CANT_OPEN_FILE fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %M)" ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %M)" greek "Δεν είναι δυνατό να ανοιχτεί το αρχείο: '%-.200s' (κωδικός λάθους: %M)" + hindi "फ़ाइल '%-.200s' नहीं खोल सकते (errno: %M)" hun "A '%-.200s' file nem nyithato meg (hibakod: %M)" ita "Impossibile aprire il file: '%-.200s' (errno: %M)" jpn "ファイル '%-.200s' をオープンできません。(エラー番号: %M)" @@ -372,6 +387,7 @@ ER_FILE_NOT_FOUND fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %M)" ger "Kann Datei '%-.200s' nicht finden (Fehler: %M)" greek "Δεν βρέθηκε το αρχείο: '%-.200s' (κωδικός λάθους: %M)" + hindi "फ़ाइल '%-.200s' नहीं मिला (errno: %M)" hun "A(z) '%-.200s' file nem talalhato (hibakod: %M)" ita "Impossibile trovare il file: '%-.200s' (errno: %M)" jpn "ファイル '%-.200s' が見つかりません。(エラー番号: %M)" @@ -396,6 +412,7 @@ ER_CANT_READ_DIR fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %M)" ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %M)" greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %M)" + hindi "'%-.192s' की डायरेक्टरी नहीं पढ़ सके (errno: %M)" hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %M)" ita "Impossibile leggere la directory di '%-.192s' (errno: %M)" jpn "ディレクトリ '%-.192s' を読み込めません。(エラー番号: %M)" @@ -420,6 +437,7 @@ ER_CANT_SET_WD fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %M)" ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %M)" greek "Αδύνατη η αλλαγή του τρέχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %M)" + hindi "'%-.192s' डायरेक्टरी में नहीं बदल सके (errno: %M)" hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %M)" ita "Impossibile cambiare la directory in '%-.192s' (errno: %M)" jpn "ディレクトリ '%-.192s' に移動できません。(エラー番号: %M)" @@ -444,6 +462,7 @@ ER_CHECKREAD fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.192s'" ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.192s' geändert" greek "Η εγγραφή έχει αλλάξει από την τελευταία φορά που ανασύρθηκε από τον πίνακα '%-.192s'" + hindi "रिकॉर्ड टेबल '%-.192s' पिछली बार पढ़े जाने के बाद से बदल गया है" hun "A(z) '%-.192s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" ita "Il record e` cambiato dall'ultima lettura della tabella '%-.192s'" jpn "表 '%-.192s' の最後の読み込み時点から、レコードが変化しました。" @@ -468,6 +487,7 @@ ER_DISK_FULL fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace... (Errcode: %M)" ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ... (Fehler: %M)" greek "Δεν υπάρχει χώρος στο δίσκο (%s). Παρακαλώ, περιμένετε να ελευθερωθεί χώρος... (κωδικός λάθους: %M)" + hindi "डिस्क पूरी तरह से भरा हुआ है (%s); कुछ स्थान खाली करें (errno: %M)" hun "A lemez megtelt (%s). (hibakod: %M)" ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio... (errno: %M)" jpn "ディスク領域不足です(%s)。(エラー番号: %M)" @@ -492,6 +512,7 @@ ER_DUP_KEY 23000 fre "Ecriture impossible, doublon dans une clé de la table '%-.192s'" ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.192s'" greek "Δεν είναι δυνατή η καταχώρηση, η τιμή υπάρχει ήδη στον πίνακα '%-.192s'" + hindi "टेबल '%-.192s' में DUPLICATE KEY मौजूद होने के कारण नहीं लिख सके" hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban" ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'" jpn "書き込めません。表 '%-.192s' に重複するキーがあります。" @@ -516,6 +537,7 @@ ER_ERROR_ON_CLOSE fre "Erreur a la fermeture de '%-.192s' (Errcode: %M)" ger "Fehler beim Schließen von '%-.192s' (Fehler: %M)" greek "Παρουσιάστηκε πρόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %M)" + hindi "'%-.192s' के बंद पर त्रुटि हुई (errno: %M)" hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %M)" ita "Errore durante la chiusura di '%-.192s' (errno: %M)" jpn "'%-.192s' のクローズ時エラー (エラー番号: %M)" @@ -540,6 +562,7 @@ ER_ERROR_ON_READ fre "Erreur en lecture du fichier '%-.200s' (Errcode: %M)" ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %M)" greek "Πρόβλημα κατά την ανάγνωση του αρχείου '%-.200s' (κωδικός λάθους: %M)" + hindi "फ़ाइल '%-.200s' पढ़ने में त्रुटि हुई (errno: %M)" hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %M)" ita "Errore durante la lettura del file '%-.200s' (errno: %M)" jpn "ファイル '%-.200s' の読み込みエラー (エラー番号: %M)" @@ -564,6 +587,7 @@ ER_ERROR_ON_RENAME fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %M)" ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %M)" greek "Πρόβλημα κατά την μετονομασία του αρχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %M)" + hindi "'%-.210s' का नाम '%-.210s' बदलने पर त्रुटि हुई (errno: %M)" hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %M)" ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %M)" jpn "'%-.210s' の名前を '%-.210s' に変更できません (エラー番号: %M)" @@ -588,6 +612,7 @@ ER_ERROR_ON_WRITE fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %M)" ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %M)" greek "Πρόβλημα κατά την αποθήκευση του αρχείου '%-.200s' (κωδικός λάθους: %M)" + hindi "फ़ाइल '%-.200s' लिखने में त्रुटि हुई (errno: %M)" hun "Hiba a '%-.200s' file irasakor. (hibakod: %M)" ita "Errore durante la scrittura del file '%-.200s' (errno: %M)" jpn "ファイル '%-.200s' の書き込みエラー (エラー番号: %M)" @@ -612,6 +637,7 @@ ER_FILE_USED fre "'%-.192s' est verrouillé contre les modifications" ger "'%-.192s' ist für Änderungen gesperrt" greek "'%-.192s' δεν επιτρέπονται αλλαγές" + hindi "फ़ाइल '%-.192s' में कोई बदलाव नहीं कर सकते" hun "'%-.192s' a valtoztatas ellen zarolva" ita "'%-.192s' e` soggetto a lock contro i cambiamenti" jpn "'%-.192s' はロックされています。" @@ -636,6 +662,7 @@ ER_FILSORT_ABORT fre "Tri alphabétique abandonné" ger "Sortiervorgang abgebrochen" greek "Η διαδικασία ταξινόμισης ακυρώθηκε" + hindi "SORT निरस्त" hun "Sikertelen rendezes" ita "Operazione di ordinamento abbandonata" jpn "ソート処理を中断しました。" @@ -660,6 +687,7 @@ ER_FORM_NOT_FOUND fre "La vue (View) '%-.192s' n'existe pas pour '%-.192s'" ger "View '%-.192s' existiert für '%-.192s' nicht" greek "Το View '%-.192s' δεν υπάρχει για '%-.192s'" + hindi "VIEW '%-.192s', '%-.192s' के लिए मौजूद नहीं है" hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz" ita "La view '%-.192s' non esiste per '%-.192s'" jpn "ビュー '%-.192s' は '%-.192s' に存在しません。" @@ -681,6 +709,7 @@ ER_GET_ERRNO fre "Reçu l'erreur %M du handler de la table %s" ger "Fehler %M von Speicher-Engine %s" greek "Ελήφθη μήνυμα λάθους %M από τον χειριστή πίνακα (table handler) %s" + hindi "%M त्रुटि %s स्टोरेज इंजन से" ita "Rilevato l'errore %M dal gestore delle tabelle %s" nor "Mottok feil %M fra tabell håndterer %s" norwegian-ny "Mottok feil %M fra tabell handterar %s" @@ -694,6 +723,7 @@ ER_GET_ERRNO ER_ILLEGAL_HA eng "Storage engine %s of the table %`s.%`s doesn't have this option" ger "Diese Option gibt es nicht in Speicher-Engine %s für %`s.%`s" + hindi "स्टोरेज इंजन %s में यह विकल्प उपलब्ध नहीं है (टेबल: %`s.%`s)" rus "Обработчик %s таблицы %`s.%`s не поддерживает эту возможность" ukr "Дескриптор %s таблиці %`s.%`s не має цієї властивості" ER_KEY_NOT_FOUND @@ -705,6 +735,7 @@ ER_KEY_NOT_FOUND fre "Ne peut trouver l'enregistrement dans '%-.192s'" ger "Kann Datensatz in '%-.192s' nicht finden" greek "Αδύνατη η ανεύρεση εγγραφής στο '%-.192s'" + hindi "'%-.192s' में रिकॉर्ड नहीं मिला" hun "Nem talalhato a rekord '%-.192s'-ben" ita "Impossibile trovare il record in '%-.192s'" jpn "'%-.192s' にレコードが見つかりません。" @@ -729,6 +760,7 @@ ER_NOT_FORM_FILE fre "Information erronnée dans le fichier: '%-.200s'" ger "Falsche Information in Datei '%-.200s'" greek "Λάθος πληροφορίες στο αρχείο: '%-.200s'" + hindi "फ़ाइल '%-.200s' में गलत जानकारी है" hun "Ervenytelen info a file-ban: '%-.200s'" ita "Informazione errata nel file: '%-.200s'" jpn "ファイル '%-.200s' 内の情報が不正です。" @@ -753,6 +785,7 @@ ER_NOT_KEYFILE fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer" ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren" greek "Λάθος αρχείο ταξινόμισης (key file) για τον πίνακα: '%-.200s'; Παρακαλώ, διορθώστε το!" + hindi "टेबल '%-.200s' का इंडेक्स CORRUPT हो गया है; इसे REPAIR करने की कोशिश करें" hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!" ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo" jpn "表 '%-.200s' の索引ファイル(key file)の内容が不正です。修復を試行してください。" @@ -777,6 +810,7 @@ ER_OLD_KEYFILE fre "Vieux fichier d'index pour la table '%-.192s'; réparez le!" ger "Alte Index-Datei für Tabelle '%-.192s'. Bitte reparieren" greek "Παλαιό αρχείο ταξινόμισης (key file) για τον πίνακα '%-.192s'; Παρακαλώ, διορθώστε το!" + hindi "टेबल '%-.192s' के लिए पुरानी KEY फ़ाइल; इसे REPAIR करने की कोशिश करें" hun "Regi kulcsfile a '%-.192s'tablahoz; probalja kijavitani!" ita "File chiave vecchio per la tabella '%-.192s'; riparalo!" jpn "表 '%-.192s' の索引ファイル(key file)は古い形式です。修復してください。" @@ -801,6 +835,7 @@ ER_OPEN_AS_READONLY fre "'%-.192s' est en lecture seulement" ger "Tabelle '%-.192s' ist nur lesbar" greek "'%-.192s' επιτρέπεται μόνο η ανάγνωση" + hindi "टेबल '%-.192s' READ-ONLY है" hun "'%-.192s' irasvedett" ita "'%-.192s' e` di sola lettura" jpn "表 '%-.192s' は読み込み専用です。" @@ -897,6 +932,7 @@ ER_CON_COUNT_ERROR 08004 fre "Trop de connexions" ger "Zu viele Verbindungen" greek "Υπάρχουν πολλές συνδέσεις..." + hindi "अत्यधिक कनेक्शन" hun "Tul sok kapcsolat" ita "Troppe connessioni" jpn "接続が多すぎます。" @@ -945,6 +981,7 @@ ER_BAD_HOST_ERROR 08S01 fre "Ne peut obtenir de hostname pour votre adresse" ger "Kann Hostnamen für diese Adresse nicht erhalten" greek "Δεν έγινε γνωστό το hostname για την address σας" + hindi "आपके I.P. ऐड्रेस के लिए होस्टनेम प्राप्त करने में विफल रहे" hun "A gepnev nem allapithato meg a cimbol" ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)" jpn "IPアドレスからホスト名を解決できません。" @@ -969,6 +1006,7 @@ ER_HANDSHAKE_ERROR 08S01 fre "Mauvais 'handshake'" ger "Ungültiger Handshake" greek "Η αναγνώριση (handshake) δεν έγινε σωστά" + hindi "संपर्क स्थापित करते समय त्रुटि हुई (BAD HANDSHAKE)" hun "A kapcsolatfelvetel nem sikerult (Bad handshake)" ita "Negoziazione impossibile" jpn "ハンドシェイクエラー" @@ -994,6 +1032,7 @@ ER_DBACCESS_DENIED_ERROR 42000 ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung für Datenbank '%-.192s'" greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s' στη βάση δεδομένων '%-.192s'" hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz" + hindi "यूज़र '%s'@'%s' को डेटाबेस '%-.192s' की अनुमति नहीं है" ita "Accesso non consentito per l'utente: '%s'@'%s' al database '%-.192s'" jpn "ユーザー '%s'@'%s' の '%-.192s' データベースへのアクセスを拒否します" kor "'%s'@'%s' 사용자는 '%-.192s' 데이타베이스에 접근이 거부 되었습니다." @@ -1017,6 +1056,7 @@ ER_ACCESS_DENIED_ERROR 28000 fre "Accès refusé pour l'utilisateur: '%s'@'%s' (mot de passe: %s)" ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)" greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s' (χρήση password: %s)" + hindi "यूज़र '%s'@'%s' को अनुमति नहीं है (पासवर्ड का उपयोग: %s)" hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" ita "Accesso non consentito per l'utente: '%s'@'%s' (Password: %s)" jpn "ユーザー '%s'@'%s' を拒否します.uUsing password: %s)" @@ -1040,6 +1080,7 @@ ER_NO_DB_ERROR 3D000 fre "Aucune base n'a été sélectionnée" ger "Keine Datenbank ausgewählt" greek "Δεν επιλέχθηκε βάση δεδομένων" + hindi "किसी भी डेटाबेस का चयन नहीं किया गया है" hun "Nincs kivalasztott adatbazis" ita "Nessun database selezionato" jpn "データベースが選択されていません。" @@ -1064,6 +1105,7 @@ ER_UNKNOWN_COM_ERROR 08S01 fre "Commande inconnue" ger "Unbekannter Befehl" greek "Αγνωστη εντολή" + hindi "अज्ञात आदेश" hun "Ervenytelen parancs" ita "Comando sconosciuto" jpn "不明なコマンドです。" @@ -1088,6 +1130,7 @@ ER_BAD_NULL_ERROR 23000 fre "Le champ '%-.192s' ne peut être vide (null)" ger "Feld '%-.192s' darf nicht NULL sein" greek "Το πεδίο '%-.192s' δεν μπορεί να είναι κενό (null)" + hindi "काँलम '%-.192s' NULL नहीं हो सकता" hun "A(z) '%-.192s' oszlop erteke nem lehet nulla" ita "La colonna '%-.192s' non puo` essere nulla" jpn "列 '%-.192s' は null にできません。" @@ -1112,6 +1155,7 @@ ER_BAD_DB_ERROR 42000 fre "Base '%-.192s' inconnue" ger "Unbekannte Datenbank '%-.192s'" greek "Αγνωστη βάση δεδομένων '%-.192s'" + hindi "अज्ञात डाटाबेस '%-.192s'" hun "Ervenytelen adatbazis: '%-.192s'" ita "Database '%-.192s' sconosciuto" jpn "'%-.192s' は不明なデータベースです。" @@ -1136,6 +1180,7 @@ ER_TABLE_EXISTS_ERROR 42S01 fre "La table '%-.192s' existe déjà" ger "Tabelle '%-.192s' bereits vorhanden" greek "Ο πίνακας '%-.192s' υπάρχει ήδη" + hindi "टेबल '%-.192s' पहले से ही मौजूद है" hun "A(z) '%-.192s' tabla mar letezik" ita "La tabella '%-.192s' esiste gia`" jpn "表 '%-.192s' はすでに存在します。" @@ -1160,6 +1205,7 @@ ER_BAD_TABLE_ERROR 42S02 fre "Table '%-.100s' inconnue" ger "Unbekannte Tabelle '%-.100s'" greek "Αγνωστος πίνακας '%-.100s'" + hindi "अज्ञात टेबल '%-.100s'" hun "Ervenytelen tabla: '%-.100s'" ita "Tabella '%-.100s' sconosciuta" jpn "'%-.100s' は不明な表です。" @@ -1184,6 +1230,7 @@ ER_NON_UNIQ_ERROR 23000 fre "Champ: '%-.192s' dans %-.192s est ambigu" ger "Feld '%-.192s' in %-.192s ist nicht eindeutig" greek "Το πεδίο: '%-.192s' σε %-.192s δεν έχει καθοριστεί" + hindi "काँलम '%-.192s' अस्पष्ट है (टेबल: %-.192s)" hun "A(z) '%-.192s' oszlop %-.192s-ben ketertelmu" ita "Colonna: '%-.192s' di %-.192s e` ambigua" jpn "列 '%-.192s' は %-.192s 内で曖昧です。" @@ -1208,6 +1255,7 @@ ER_SERVER_SHUTDOWN 08S01 fre "Arrêt du serveur en cours" ger "Der Server wird heruntergefahren" greek "Εναρξη διαδικασίας αποσύνδεσης του εξυπηρετητή (server shutdown)" + hindi "सर्वर बंद हो रहा है" hun "A szerver leallitasa folyamatban" ita "Shutdown del server in corso" jpn "サーバーをシャットダウン中です。" @@ -1232,6 +1280,7 @@ ER_BAD_FIELD_ERROR 42S22 S0022 fre "Champ '%-.192s' inconnu dans %-.192s" ger "Unbekanntes Tabellenfeld '%-.192s' in %-.192s" greek "Αγνωστο πεδίο '%-.192s' σε '%-.192s'" + hindi "अज्ञात काँलम '%-.192s'(टेबल: '%-.192s')" hun "A(z) '%-.192s' oszlop ervenytelen '%-.192s'-ben" ita "Colonna sconosciuta '%-.192s' in '%-.192s'" jpn "列 '%-.192s' は '%-.192s' にはありません。" @@ -1256,6 +1305,7 @@ ER_WRONG_FIELD_WITH_GROUP 42000 S1009 fre "'%-.192s' n'est pas dans 'group by'" ger "'%-.192s' ist nicht in GROUP BY vorhanden" greek "Χρησιμοποιήθηκε '%-.192s' που δεν υπήρχε στο group by" + hindi "'%-.192s' GROUP BY में नहीं है" hun "Used '%-.192s' with wasn't in group by" ita "Usato '%-.192s' che non e` nel GROUP BY" jpn "'%-.192s' はGROUP BY句で指定されていません。" @@ -1280,6 +1330,7 @@ ER_WRONG_GROUP_FIELD 42000 S1009 fre "Ne peut regrouper '%-.192s'" ger "Gruppierung über '%-.192s' nicht möglich" greek "Αδύνατη η ομαδοποίηση (group on) '%-.192s'" + hindi "'%-.192s' पर GROUP नहीं कर सकते" hun "A group nem hasznalhato: '%-.192s'" ita "Impossibile raggruppare per '%-.192s'" jpn "'%-.192s' でのグループ化はできません。" @@ -1326,6 +1377,7 @@ ER_WRONG_VALUE_COUNT 21S01 est "Tulpade arv erineb väärtuste arvust" ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte" greek "Το Column count δεν ταιριάζει με το value count" + hindi "कॉलम की गिनती मूल्य की गिनती के समान नही है" hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel" ita "Il numero delle colonne non e` uguale al numero dei valori" jpn "列数が値の個数と一致しません。" @@ -1350,6 +1402,7 @@ ER_TOO_LONG_IDENT 42000 S1009 fre "Le nom de l'identificateur '%-.100s' est trop long" ger "Name des Bezeichners '%-.100s' ist zu lang" greek "Το identifier name '%-.100s' είναι πολύ μεγάλο" + hindi "पहचानकर्ता का नाम '%-.100s' बहुत लंबा है" hun "A(z) '%-.100s' azonositonev tul hosszu" ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" jpn "識別子名 '%-.100s' は長すぎます。" @@ -1374,6 +1427,7 @@ ER_DUP_FIELDNAME 42S21 S1009 fre "Nom du champ '%-.192s' déjà utilisé" ger "Doppelter Spaltenname: '%-.192s'" greek "Επανάληψη column name '%-.192s'" + hindi "समान कॉलम '%-.192s' मौजूद है" hun "Duplikalt oszlopazonosito: '%-.192s'" ita "Nome colonna duplicato '%-.192s'" jpn "列名 '%-.192s' は重複してます。" @@ -1398,6 +1452,7 @@ ER_DUP_KEYNAME 42000 S1009 fre "Nom de clef '%-.192s' déjà utilisé" ger "Doppelter Name für Schlüssel vorhanden: '%-.192s'" greek "Επανάληψη key name '%-.192s'" + hindi "समान KEY '%-.192s' मौजूद है" hun "Duplikalt kulcsazonosito: '%-.192s'" ita "Nome chiave duplicato '%-.192s'" jpn "索引名 '%-.192s' は重複しています。" @@ -1424,6 +1479,7 @@ ER_DUP_ENTRY 23000 S1009 fre "Duplicata du champ '%-.192s' pour la clef %d" ger "Doppelter Eintrag '%-.192s' für Schlüssel %d" greek "Διπλή εγγραφή '%-.192s' για το κλειδί %d" + hindi "सामान प्रवेश '%-.192s' KEY %d के लिए" hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint" ita "Valore duplicato '%-.192s' per la chiave %d" jpn "'%-.192s' は索引 %d で重複しています。" @@ -1448,6 +1504,7 @@ ER_WRONG_FIELD_SPEC 42000 S1009 fre "Mauvais paramètre de champ pour le champ '%-.192s'" ger "Falsche Spezifikation für Feld '%-.192s'" greek "Εσφαλμένο column specifier για το πεδίο '%-.192s'" + hindi "कॉलम '%-.192s' के लिए गलत कॉलम विनिर्देशक" hun "Rossz oszlopazonosito: '%-.192s'" ita "Specifica errata per la colonna '%-.192s'" jpn "列 '%-.192s' の定義が不正です。" @@ -1472,6 +1529,7 @@ ER_PARSE_ERROR 42000 s1009 fre "%s près de '%-.80s' à la ligne %d" ger "%s bei '%-.80s' in Zeile %d" greek "%s πλησίον '%-.80s' στη γραμμή %d" + hindi "%s के पास '%-.80s' लाइन %d में" hun "A %s a '%-.80s'-hez kozeli a %d sorban" ita "%s vicino a '%-.80s' linea %d" jpn "%s : '%-.80s' 付近 %d 行目" @@ -1496,6 +1554,7 @@ ER_EMPTY_QUERY 42000 fre "Query est vide" ger "Leere Abfrage" greek "Το ερώτημα (query) που θέσατε ήταν κενό" + hindi "क्वेरी खली थी" hun "Ures lekerdezes" ita "La query e` vuota" jpn "クエリが空です。" @@ -1520,6 +1579,7 @@ ER_NONUNIQ_TABLE 42000 S1009 fre "Table/alias: '%-.192s' non unique" ger "Tabellenname/Alias '%-.192s' nicht eindeutig" greek "Αδύνατη η ανεύρεση unique table/alias: '%-.192s'" + hindi "टेबल या उसका उपनाम '%-.192s' अद्वितीय नहीं है" hun "Nem egyedi tabla/alias: '%-.192s'" ita "Tabella/alias non unico: '%-.192s'" jpn "表名/別名 '%-.192s' は一意ではありません。" @@ -1544,6 +1604,7 @@ ER_INVALID_DEFAULT 42000 S1009 fre "Valeur par défaut invalide pour '%-.192s'" ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.192s'" greek "Εσφαλμένη προκαθορισμένη τιμή (default value) για '%-.192s'" + hindi "'%-.192s' के लिए अवैध डिफ़ॉल्ट मान" hun "Ervenytelen ertek: '%-.192s'" ita "Valore di default non valido per '%-.192s'" jpn "'%-.192s' へのデフォルト値が無効です。" @@ -1568,6 +1629,7 @@ ER_MULTIPLE_PRI_KEY 42000 S1009 fre "Plusieurs clefs primaires définies" ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert" greek "Περισσότερα από ένα primary key ορίστηκαν" + hindi "कई PRIMARY KEY परिभाषित" hun "Tobbszoros elsodleges kulcs definialas" ita "Definite piu` chiave primarie" jpn "PRIMARY KEY が複数定義されています。" @@ -1593,6 +1655,7 @@ ER_TOO_MANY_KEYS 42000 S1009 ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt" greek "Πάρα πολλά key ορίσθηκαν. Το πολύ %d επιτρέπονται" hun "Tul sok kulcs. Maximum %d kulcs engedelyezett" + hindi "बहुत सारी KEYS निर्दिष्ट हैं; अधिकतम %d KEYS की अनुमति है" ita "Troppe chiavi. Sono ammesse max %d chiavi" jpn "索引の数が多すぎます。最大 %d 個までです。" kor "너무 많은 키가 정의되어 있읍니다.. 최대 %d의 키가 가능함" @@ -1616,6 +1679,7 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009 fre "Trop de parties specifiées dans la clef. Maximum de %d parties" ger "Zu viele Teilschlüssel definiert. Maximal %d Teilschlüssel erlaubt" greek "Πάρα πολλά key parts ορίσθηκαν. Το πολύ %d επιτρέπονται" + hindi "बहुत सारे KEY के भाग निर्दिष्ट हैं; अधिकतम %d भागों की अनुमति है" hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett" ita "Troppe parti di chiave specificate. Sono ammesse max %d parti" jpn "索引のキー列指定が多すぎます。最大 %d 個までです。" @@ -1640,6 +1704,7 @@ ER_TOO_LONG_KEY 42000 S1009 fre "La clé est trop longue. Longueur maximale: %d" ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d" greek "Το κλειδί που ορίσθηκε είναι πολύ μεγάλο. Το μέγιστο μήκος είναι %d" + hindi "निर्दिष्ट KEY बहुत लंबी थी; KEY की अधिकतम लंबाई %d बाइट है" hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d" ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d" jpn "索引のキーが長すぎます。最大 %d バイトまでです。" @@ -1664,6 +1729,7 @@ ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 fre "La clé '%-.192s' n'existe pas dans la table" ger "In der Tabelle gibt es kein Schlüsselfeld '%-.192s'" greek "Το πεδίο κλειδί '%-.192s' δεν υπάρχει στον πίνακα" + hindi "KEY कॉलम '%-.192s' टेबल में मौजूद नहीं है" hun "A(z) '%-.192s'kulcsoszlop nem letezik a tablaban" ita "La colonna chiave '%-.192s' non esiste nella tabella" jpn "キー列 '%-.192s' は表にありません。" @@ -1682,6 +1748,7 @@ ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 ER_BLOB_USED_AS_KEY 42000 S1009 eng "BLOB column %`s can't be used in key specification in the %s table" ger "BLOB-Feld %`s kann beim %s Tabellen nicht als Schlüssel verwendet werden" + hindi "BLOB कॉलम %`s टेबल %s में KEY विनिर्देश में इस्तेमाल नहीं किया जा सकता" rus "Столбец типа BLOB %`s не может быть использован как значение ключа в %s таблице" ukr "BLOB стовбець %`s не може бути використаний у визначенні ключа в %s таблиці" ER_TOO_BIG_FIELDLENGTH 42000 S1009 @@ -1693,6 +1760,7 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009 fre "Champ '%-.192s' trop long (max = %lu). Utilisez un BLOB" ger "Feldlänge für Feld '%-.192s' zu groß (maximal %lu). BLOB- oder TEXT-Spaltentyp verwenden!" greek "Πολύ μεγάλο μήκος για το πεδίο '%-.192s' (max = %lu). Παρακαλώ χρησιμοποιείστε τον τύπο BLOB" + hindi "कॉलम की लंबाई कॉलम '%-.192s' के लिए बड़ी है (अधिकतम = %lu); BLOB या TEXT का उपयोग करें" hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %lu). Hasznaljon BLOB tipust inkabb" ita "La colonna '%-.192s' e` troppo grande (max=%lu). Utilizza un BLOB" jpn "列 '%-.192s' のサイズ定義が大きすぎます (最大 %lu まで)。代わりに BLOB または TEXT を使用してください。" @@ -1717,6 +1785,7 @@ ER_WRONG_AUTO_KEY 42000 S1009 fre "Un seul champ automatique est permis et il doit être indexé" ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden" greek "Μπορεί να υπάρχει μόνο ένα auto field και πρέπει να έχει ορισθεί σαν key" + hindi "गलत टेबल परिभाषा; टेबल में केवल एक AUTO_INCREMENT कॉलम हो सकता है और इसे एक KEY के रूप में परिभाषित किया जाना चाहिए" hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni" ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave" jpn "不正な表定義です。AUTO_INCREMENT列は1個までで、索引を定義する必要があります。" @@ -1743,6 +1812,7 @@ ER_NORMAL_SHUTDOWN fre "%s (%s): Arrêt normal du serveur\n" ger "%s (%s): Normal heruntergefahren\n" greek "%s (%s): Φυσιολογική διαδικασία shutdown\n" + hindi "%s (%s): सामान्य शटडाउन\n" hun "%s (%s): Normal leallitas\n" ita "%s (%s): Shutdown normale\n" jpn "%s (%s): 通常シャットダウン\n" @@ -1767,6 +1837,7 @@ ER_GOT_SIGNAL fre "%s: Reçu le signal %d. Abandonne!\n" ger "%s: Signal %d erhalten. Abbruch!\n" greek "%s: Ελήφθη το μήνυμα %d. Η διαδικασία εγκαταλείπεται!\n" + hindi "%s: सिग्नल %d मिलने के कारण सिस्टम बंद किया जा रहा है!\n" hun "%s: %d jelzes. Megszakitva!\n" ita "%s: Ricevuto segnale %d. Interruzione!\n" jpn "%s: シグナル %d を受信しました。強制終了します!\n" @@ -1791,6 +1862,7 @@ ER_SHUTDOWN_COMPLETE fre "%s: Arrêt du serveur terminé\n" ger "%s: Herunterfahren beendet\n" greek "%s: Η διαδικασία Shutdown ολοκληρώθηκε\n" + hindi "%s: शटडाउन पूर्ण\n" hun "%s: A leallitas kesz\n" ita "%s: Shutdown completato\n" jpn "%s: シャットダウン完了\n" @@ -1815,6 +1887,7 @@ ER_FORCING_CLOSE 08S01 fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.48s'\n" ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.48s'\n" greek "%s: Το thread θα κλείσει %ld user: '%-.48s'\n" + hindi "%s: %ld थ्रेड बंद किया जा रहा है (यूज़र: '%-.48s')\n" hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.48s'\n" ita "%s: Forzata la chiusura del thread %ld utente: '%-.48s'\n" jpn "%s: スレッド %ld を強制終了します (ユーザー: '%-.48s')\n" @@ -1839,6 +1912,7 @@ ER_IPSOCK_ERROR 08S01 fre "Ne peut créer la connexion IP (socket)" ger "Kann IP-Socket nicht erzeugen" greek "Δεν είναι δυνατή η δημιουργία IP socket" + hindi "IP SOCKET नहीं बना सकते" hun "Az IP socket nem hozhato letre" ita "Impossibile creare il socket IP" jpn "IPソケットを作成できません。" @@ -1863,6 +1937,7 @@ ER_NO_SUCH_INDEX 42S12 S1009 fre "La table '%-.192s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table" ger "Tabelle '%-.192s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" greek "Ο πίνακας '%-.192s' δεν έχει ευρετήριο (index) σαν αυτό που χρησιμοποιείτε στην CREATE INDEX. Παρακαλώ, ξαναδημιουργήστε τον πίνακα" + hindi "CREATE INDEX में इस्तेमाल की गयी सूचि टेबल '%-.192s' में उपलब्ध नहीं है; टेबल को पुनः बनायें" hun "A(z) '%-.192s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" ita "La tabella '%-.192s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" jpn "表 '%-.192s' に以前CREATE INDEXで作成された索引がありません。表を作り直してください。" @@ -1887,6 +1962,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009 fre "Séparateur de champs inconnu. Vérifiez dans le manuel" ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen" greek "Ο διαχωριστής πεδίων δεν είναι αυτός που αναμενόταν. Παρακαλώ ανατρέξτε στο manual" + hindi "फील्ड विभाजक आर्गुमेंट गलत है; मैनुअल की जाँच करें" hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!" ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale" jpn "フィールド区切り文字が予期せぬ使われ方をしています。マニュアルを確認して下さい。" @@ -1911,6 +1987,7 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009 fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'" ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden" greek "Δεν μπορείτε να χρησιμοποιήσετε fixed rowlength σε BLOBs. Παρακαλώ χρησιμοποιείστε 'fields terminated by'" + hindi "BLOBs को निश्चित लंबाई की पंक्ति के साथ प्रयोग नहीं किया जा सकता है; 'FIELDS TERMINATED BY' का इस्तेमाल करें" hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' " ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'" jpn "BLOBには固定長レコードが使用できません。'FIELDS TERMINATED BY'句を使用して下さい。" @@ -1935,6 +2012,7 @@ ER_TEXTFILE_NOT_READABLE fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous" ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein" greek "Το αρχείο '%-.128s' πρέπει να υπάρχει στο database directory ή να μπορεί να διαβαστεί από όλους" + hindi "फ़ाइल '%-.128s' डेटाबेस डायरेक्टरी में या सभी के द्वारा पठनीय होना चाहिए" hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti" jpn "ファイル '%-.128s' はデータベースディレクトリにあるか、全てのユーザーから読める必要があります。" @@ -1959,6 +2037,7 @@ ER_FILE_EXISTS_ERROR fre "Le fichier '%-.200s' existe déjà" ger "Datei '%-.200s' bereits vorhanden" greek "Το αρχείο '%-.200s' υπάρχει ήδη" + hindi "फ़ाइल '%-.200s' पहले से मौजूद है" hun "A '%-.200s' file mar letezik" ita "Il file '%-.200s' esiste gia`" jpn "ファイル '%-.200s' はすでに存在します。" @@ -1983,6 +2062,7 @@ ER_LOAD_INFO fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld" ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld" greek "Εγγραφές: %ld Διαγραφές: %ld Παρεκάμφθησαν: %ld Προειδοποιήσεις: %ld" + hindi "रिकॉर्ड: %ld हटाए गए: %ld छोड़ दिए गए: %ld चेतावनी: %ld" hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld" ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld" jpn "レコード数: %ld 削除: %ld スキップ: %ld 警告: %ld" @@ -2007,6 +2087,7 @@ ER_ALTER_INFO fre "Enregistrements: %ld Doublons: %ld" ger "Datensätze: %ld Duplikate: %ld" greek "Εγγραφές: %ld Επαναλήψεις: %ld" + hindi "रिकॉर्ड: %ld डुप्लिकेट: %ld" hun "Rekordok: %ld Duplikalva: %ld" ita "Records: %ld Duplicati: %ld" jpn "レコード数: %ld 重複: %ld" @@ -2055,6 +2136,7 @@ ER_CANT_REMOVE_ALL_FIELDS 42000 fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE" ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden" greek "Δεν είναι δυνατή η διαγραφή όλων των πεδίων με ALTER TABLE. Παρακαλώ χρησιμοποιείστε DROP TABLE" + hindi "ALTER TABLE का इस्तेमाल कर सभी कॉलम्स को हटाया नहीं जा सकता; DROP TABLE का इस्तेमाल करें" hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette" ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE" jpn "ALTER TABLE では全ての列の削除はできません。DROP TABLE を使用してください。" @@ -2079,6 +2161,7 @@ ER_CANT_DROP_FIELD_OR_KEY 42000 fre "Ne peut effacer (DROP %s) %`-.192s. Vérifiez s'il existe" ger "DROP %s: Kann %`-.192s nicht löschen. Existiert es?" greek "Αδύνατη η διαγραφή (DROP %s) %`-.192s. Παρακαλώ ελέγξτε αν το πεδίο/κλειδί υπάρχει" + hindi "%s %`-.192s को ड्रॉप नहीं कर सकते हैं; कृपया जाँच करें कि यह मौजूद है" hun "A DROP %s %`-.192s nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" ita "Impossibile cancellare (DROP %s) %`-.192s. Controllare che il campo chiave esista" nor "Kan ikke DROP %s %`-.192s. Undersøk om felt/nøkkel eksisterer" @@ -2101,6 +2184,7 @@ ER_INSERT_INFO fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld" ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld" greek "Εγγραφές: %ld Επαναλήψεις: %ld Προειδοποιήσεις: %ld" + hindi "रिकॉर्ड: %ld डुप्लिकेट: %ld चेतावनी: %ld" hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld" ita "Records: %ld Duplicati: %ld Avvertimenti: %ld" jpn "レコード数: %ld 重複数: %ld 警告: %ld" @@ -2128,6 +2212,7 @@ ER_NO_SUCH_THREAD fre "Numéro de tâche inconnu: %lu" ger "Unbekannte Thread-ID: %lu" greek "Αγνωστο thread id: %lu" + hindi "अज्ञात थ्रेड ID: %lu" hun "Ervenytelen szal (thread) id: %lu" ita "Thread id: %lu sconosciuto" jpn "不明なスレッドIDです: %lu" @@ -2152,6 +2237,7 @@ ER_KILL_DENIED_ERROR fre "Vous n'êtes pas propriétaire de la tâche no: %lu" ger "Sie sind nicht Eigentümer von Thread %lu" greek "Δεν είσθε owner του thread %lu" + hindi "आप थ्रेड %lu के OWNER नहीं हैं" hun "A %lu thread-nek mas a tulajdonosa" ita "Utente non proprietario del thread %lu" jpn "スレッド %lu のオーナーではありません。" @@ -2176,6 +2262,7 @@ ER_NO_TABLES_USED fre "Aucune table utilisée" ger "Keine Tabellen verwendet" greek "Δεν χρησιμοποιήθηκαν πίνακες" + hindi "कोई टेबल का इस्तेमाल नहीं हुआ" hun "Nincs hasznalt tabla" ita "Nessuna tabella usata" jpn "表が指定されていません。" @@ -2224,6 +2311,7 @@ ER_NO_UNIQUE_LOGFILE fre "Ne peut générer un unique nom de journal %-.200s.(1-999)\n" ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.200s(1-999) erzeugen\n" greek "Αδύνατη η δημιουργία unique log-filename %-.200s.(1-999)\n" + hindi "एक अनूठा लॉग-फ़ाइल नाम %-.200s.(1-999) उत्पन्न नहीं कर सके\n" hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n" ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n" jpn "一意なログファイル名 %-.200s.(1-999) を生成できません。\n" @@ -2248,6 +2336,7 @@ ER_TABLE_NOT_LOCKED_FOR_WRITE fre "Table '%-.192s' verrouillée lecture (READ): modification impossible" ger "Tabelle '%-.192s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" greek "Ο πίνακας '%-.192s' έχει κλειδωθεί με READ lock και δεν επιτρέπονται αλλαγές" + hindi "टेबल '%-.192s' READ लॉक से बंद है और उसे बदल नहीं सकते" hun "A(z) '%-.192s' tabla zarolva lett (READ lock) es nem lehet frissiteni" ita "La tabella '%-.192s' e` soggetta a lock in lettura e non puo` essere aggiornata" jpn "表 '%-.192s' はREADロックされていて、更新できません。" @@ -2272,6 +2361,7 @@ ER_TABLE_NOT_LOCKED fre "Table '%-.192s' non verrouillée: utilisez LOCK TABLES" ger "Tabelle '%-.192s' wurde nicht mit LOCK TABLES gesperrt" greek "Ο πίνακας '%-.192s' δεν έχει κλειδωθεί με LOCK TABLES" + hindi "टेबल '%-.192s' LOCK TABLES से बंद नहीं है" hun "A(z) '%-.192s' tabla nincs zarolva a LOCK TABLES-szel" ita "Non e` stato impostato il lock per la tabella '%-.192s' con LOCK TABLES" jpn "表 '%-.192s' は LOCK TABLES でロックされていません。" @@ -2296,6 +2386,7 @@ ER_BLOB_CANT_HAVE_DEFAULT 42000 fre "BLOB '%-.192s' ne peut avoir de valeur par défaut" ger "BLOB/TEXT-Feld '%-.192s' darf keinen Vorgabewert (DEFAULT) haben" greek "Τα Blob πεδία '%-.192s' δεν μπορούν να έχουν προκαθορισμένες τιμές (default value)" + hindi "BLOB/TEXT कॉलम '%-.192s' का डिफ़ॉल्ट मान नहीं हो सकता" hun "A(z) '%-.192s' blob objektumnak nem lehet alapertelmezett erteke" ita "Il campo BLOB '%-.192s' non puo` avere un valore di default" jpn "BLOB/TEXT 列 '%-.192s' にはデフォルト値を指定できません。" @@ -2320,6 +2411,7 @@ ER_WRONG_DB_NAME 42000 fre "Nom de base de donnée illégal: '%-.100s'" ger "Unerlaubter Datenbankname '%-.100s'" greek "Λάθος όνομα βάσης δεδομένων '%-.100s'" + hindi "डेटाबेस नाम '%-.100s' गलत है" hun "Hibas adatbazisnev: '%-.100s'" ita "Nome database errato '%-.100s'" jpn "データベース名 '%-.100s' は不正です。" @@ -2344,6 +2436,7 @@ ER_WRONG_TABLE_NAME 42000 fre "Nom de table illégal: '%-.100s'" ger "Unerlaubter Tabellenname '%-.100s'" greek "Λάθος όνομα πίνακα '%-.100s'" + hindi "टेबल नाम '%-.100s' गलत है" hun "Hibas tablanev: '%-.100s'" ita "Nome tabella errato '%-.100s'" jpn "表名 '%-.100s' は不正です。" @@ -2369,6 +2462,7 @@ ER_TOO_BIG_SELECT 42000 ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET MAX_JOIN_SIZE=# verwenden" greek "Το SELECT θα εξετάσει μεγάλο αριθμό εγγραφών και πιθανώς θα καθυστερήσει. Παρακαλώ εξετάστε τις παραμέτρους του WHERE και χρησιμοποιείστε SET SQL_BIG_SELECTS=1 αν το SELECT είναι σωστό" hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay" + hindi "SELECT कमांड MAX_JOIN_SIZE पंक्तियों से भी ज्यादा की जांच करेगा; कृपया WHERE क्लॉज़ को जाचें अथवा SET SQL_BIG_SELECTS=1 या SET MAX_JOIN_SIZE=# का इस्तेमाल करें" ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto" jpn "SELECTがMAX_JOIN_SIZEを超える行数を処理しました。WHERE句を確認し、SELECT文に問題がなければ、 SET SQL_BIG_SELECTS=1 または SET MAX_JOIN_SIZE=# を使用して下さい。" kor "SELECT 명령에서 너무 많은 레코드를 찾기 때문에 많은 시간이 소요됩니다. 따라서 WHERE 문을 점검하거나, 만약 SELECT가 ok되면 SET SQL_BIG_SELECTS=1 옵션을 사용하세요." @@ -2392,6 +2486,7 @@ ER_UNKNOWN_ERROR fre "Erreur inconnue" ger "Unbekannter Fehler" greek "Προέκυψε άγνωστο λάθος" + hindi "अज्ञात त्रुटि हुई" hun "Ismeretlen hiba" ita "Errore sconosciuto" jpn "不明なエラー" @@ -2415,6 +2510,7 @@ ER_UNKNOWN_PROCEDURE 42000 fre "Procédure %-.192s inconnue" ger "Unbekannte Prozedur '%-.192s'" greek "Αγνωστη διαδικασία '%-.192s'" + hindi "अज्ञात प्रोसीजर '%-.192s'" hun "Ismeretlen eljaras: '%-.192s'" ita "Procedura '%-.192s' sconosciuta" jpn "'%-.192s' は不明なプロシージャです。" @@ -2439,6 +2535,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 fre "Mauvais nombre de paramètres pour la procedure %-.192s" ger "Falsche Parameterzahl für Prozedur '%-.192s'" greek "Λάθος αριθμός παραμέτρων στη διαδικασία '%-.192s'" + hindi "प्रोसीजर '%-.192s' के लिए पैरामीटर की संख्या गलत है" hun "Rossz parameter a(z) '%-.192s'eljaras szamitasanal" ita "Numero di parametri errato per la procedura '%-.192s'" jpn "プロシージャ '%-.192s' へのパラメータ数が不正です。" @@ -2463,6 +2560,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE fre "Paramètre erroné pour la procedure %-.192s" ger "Falsche Parameter für Prozedur '%-.192s'" greek "Λάθος παράμετροι στην διαδικασία '%-.192s'" + hindi "प्रोसीजर '%-.192s' के लिए पैरामीटर्स गलत हैं" hun "Rossz parameter a(z) '%-.192s' eljarasban" ita "Parametri errati per la procedura '%-.192s'" jpn "プロシージャ '%-.192s' へのパラメータが不正です。" @@ -2487,6 +2585,7 @@ ER_UNKNOWN_TABLE 42S02 fre "Table inconnue '%-.192s' dans %-.32s" ger "Unbekannte Tabelle '%-.192s' in '%-.32s'" greek "Αγνωστος πίνακας '%-.192s' σε %-.32s" + hindi "टेबल '%-.192s', %-.32s में नहीं मिला" hun "Ismeretlen tabla: '%-.192s' %-.32s-ban" ita "Tabella '%-.192s' sconosciuta in %-.32s" jpn "'%-.192s' は %-.32s では不明な表です。" @@ -2511,6 +2610,7 @@ ER_FIELD_SPECIFIED_TWICE 42000 fre "Champ '%-.192s' spécifié deux fois" ger "Feld '%-.192s' wurde zweimal angegeben" greek "Το πεδίο '%-.192s' έχει ορισθεί δύο φορές" + hindi "कॉलम '%-.192s' दो बार निर्दिष्ट किया गया है" hun "A(z) '%-.192s' mezot ketszer definialta" ita "Campo '%-.192s' specificato 2 volte" jpn "列 '%-.192s' は2回指定されています。" @@ -2535,6 +2635,7 @@ ER_INVALID_GROUP_FUNC_USE fre "Utilisation invalide de la clause GROUP" ger "Falsche Verwendung einer Gruppierungsfunktion" greek "Εσφαλμένη χρήση της group function" + hindi "ग्रुप फंक्शन का अवैध उपयोग" hun "A group funkcio ervenytelen hasznalata" ita "Uso non valido di una funzione di raggruppamento" jpn "集計関数の使用方法が不正です。" @@ -2556,6 +2657,7 @@ ER_UNSUPPORTED_EXTENSION 42000 fre "Table '%-.192s' : utilise une extension invalide pour cette version de MariaDB" ger "Tabelle '%-.192s' verwendet eine Erweiterung, die in dieser MariaDB-Version nicht verfügbar ist" greek "Ο πίνακς '%-.192s' χρησιμοποιεί κάποιο extension που δεν υπάρχει στην έκδοση αυτή της MariaDB" + hindi "टेबल '%-.192s' जिस इक्स्टेन्शन का उपयोग कर रहा है, वह इस MariaDB संस्करण में उपलब्ध नहीं है" hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MariaDB versioban" ita "La tabella '%-.192s' usa un'estensione che non esiste in questa versione di MariaDB" jpn "表 '%-.192s' は、このMySQLバージョンには無い機能を使用しています。" @@ -2580,6 +2682,7 @@ ER_TABLE_MUST_HAVE_COLUMNS 42000 fre "Une table doit comporter au moins une colonne" ger "Eine Tabelle muss mindestens eine Spalte besitzen" greek "Ενας πίνακας πρέπει να έχει τουλάχιστον ένα πεδίο" + hindi "एक टेबल में कम से कम एक कॉलम होना चाहिए" hun "A tablanak legalabb egy oszlopot tartalmazni kell" ita "Una tabella deve avere almeno 1 colonna" jpn "表には最低でも1個の列が必要です。" @@ -2601,6 +2704,7 @@ ER_RECORD_FILE_FULL fre "La table '%-.192s' est pleine" ger "Tabelle '%-.192s' ist voll" greek "Ο πίνακας '%-.192s' είναι γεμάτος" + hindi "टेबल '%-.192s' पूरा भरा है" hun "A '%-.192s' tabla megtelt" ita "La tabella '%-.192s' e` piena" jpn "表 '%-.192s' は満杯です。" @@ -2622,6 +2726,7 @@ ER_UNKNOWN_CHARACTER_SET 42000 fre "Jeu de caractères inconnu: '%-.64s'" ger "Unbekannter Zeichensatz: '%-.64s'" greek "Αγνωστο character set: '%-.64s'" + hindi "अज्ञात CHARACTER SET: '%-.64s'" hun "Ervenytelen karakterkeszlet: '%-.64s'" ita "Set di caratteri '%-.64s' sconosciuto" jpn "不明な文字コードセット: '%-.64s'" @@ -2643,6 +2748,7 @@ ER_TOO_MANY_TABLES fre "Trop de tables. MariaDB ne peut utiliser que %d tables dans un JOIN" ger "Zu viele Tabellen. MariaDB kann in einem Join maximal %d Tabellen verwenden" greek "Πολύ μεγάλος αριθμός πινάκων. Η MariaDB μπορεί να χρησιμοποιήσει %d πίνακες σε διαδικασία join" + hindi "बहुत अधिक टेबल्स, MariaDB एक JOIN में केवल %d टेबल्स का उपयोग कर सकता है" hun "Tul sok tabla. A MariaDB csak %d tablat tud kezelni osszefuzeskor" ita "Troppe tabelle. MariaDB puo` usare solo %d tabelle in una join" jpn "表が多すぎます。MySQLがJOINできる表は %d 個までです。" @@ -2664,6 +2770,7 @@ ER_TOO_MANY_FIELDS fre "Trop de champs" ger "Zu viele Felder" greek "Πολύ μεγάλος αριθμός πεδίων" + hindi "बहुत अधिक कॉलम्स" hun "Tul sok mezo" ita "Troppi campi" jpn "列が多すぎます。" @@ -2749,6 +2856,7 @@ ER_CANT_FIND_UDF fre "Imposible de charger la fonction '%-.192s'" ger "Kann Funktion '%-.192s' nicht laden" greek "Δεν είναι δυνατή η διαδικασία load για τη συνάρτηση '%-.192s'" + hindi "फंक्शन '%-.192s' लोड नहीं किया जा सका" hun "A(z) '%-.192s' fuggveny nem toltheto be" ita "Impossibile caricare la funzione '%-.192s'" jpn "関数 '%-.192s' をロードできません。" @@ -2770,6 +2878,7 @@ ER_CANT_INITIALIZE_UDF fre "Impossible d'initialiser la fonction '%-.192s'; %-.80s" ger "Kann Funktion '%-.192s' nicht initialisieren: %-.80s" greek "Δεν είναι δυνατή η έναρξη της συνάρτησης '%-.192s'; %-.80s" + hindi "फंक्शन '%-.192s' को प्रारंभ नहीं किया जा सका; %-.80s" hun "A(z) '%-.192s' fuggveny nem inicializalhato; %-.80s" ita "Impossibile inizializzare la funzione '%-.192s'; %-.80s" jpn "関数 '%-.192s' を初期化できません。; %-.80s" @@ -2812,6 +2921,7 @@ ER_UDF_EXISTS fre "La fonction '%-.192s' existe déjà" ger "Funktion '%-.192s' existiert schon" greek "Η συνάρτηση '%-.192s' υπάρχει ήδη" + hindi "फंक्शन '%-.192s' पहले से मौजूद है" hun "A '%-.192s' fuggveny mar letezik" ita "La funzione '%-.192s' esiste gia`" jpn "関数 '%-.192s' はすでに定義されています。" @@ -2878,6 +2988,7 @@ ER_FUNCTION_NOT_DEFINED fre "La fonction '%-.192s' n'est pas définie" ger "Funktion '%-.192s' ist nicht definiert" greek "Η συνάρτηση '%-.192s' δεν έχει ορισθεί" + hindi "फंक्शन '%-.192s' की परिभाषा नहीं मिली" hun "A '%-.192s' fuggveny nem definialt" ita "La funzione '%-.192s' non e` definita" jpn "関数 '%-.192s' は定義されていません。" @@ -2899,6 +3010,7 @@ ER_HOST_IS_BLOCKED fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connexion. Débloquer le par 'mysqladmin flush-hosts'" ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" greek "Ο υπολογιστής '%-.64s' έχει αποκλεισθεί λόγω πολλαπλών λαθών σύνδεσης. Προσπαθήστε να διορώσετε με 'mysqladmin flush-hosts'" + hindi "होस्ट '%-.64s' को कई कनेक्शन में त्रुटियों के कारण ब्लॉक कर दिया गया है; 'mysqladmin flush-hosts' का इस्तेमाल कर अनब्लॉक करें" hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" jpn "接続エラーが多いため、ホスト '%-.64s' は拒否されました。'mysqladmin flush-hosts' で解除できます。" @@ -2919,6 +3031,7 @@ ER_HOST_NOT_PRIVILEGED fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MariaDB" ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MariaDB-Server zu verbinden" greek "Ο υπολογιστής '%-.64s' δεν έχει δικαίωμα σύνδεσης με τον MariaDB server" + hindi "होस्ट '%-.64s' को इस MariaDB सर्वर से कनेक्ट करने के लिए अनुमति नहीं है" hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MariaDB szerverhez" ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MariaDB" jpn "ホスト '%-.64s' からのこの MySQL server への接続は許可されていません。" @@ -2939,6 +3052,7 @@ ER_PASSWORD_ANONYMOUS_USER 42000 fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe" ger "Sie benutzen MariaDB als anonymer Benutzer und dürfen daher keine Passwörter ändern" greek "Χρησιμοποιείτε την MariaDB σαν anonymous user και έτσι δεν μπορείτε να αλλάξετε τα passwords άλλων χρηστών" + hindi "आप MariaDB का उपयोग एक बेनाम यूज़र की तरह कर रहे हैं; बेनाम यूज़र्स को 'यूज़र सेटिंग्स' बदलने की अनुमति नहीं है" hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas" ita "Impossibile cambiare la password usando MariaDB come utente anonimo" jpn "MySQL を匿名ユーザーで使用しているので、パスワードの変更はできません。" @@ -2979,6 +3093,7 @@ ER_PASSWORD_NO_MATCH 28000 fre "Impossible de trouver un enregistrement correspondant dans la table user" ger "Kann keinen passenden Datensatz in Tabelle 'user' finden" greek "Δεν είναι δυνατή η ανεύρεση της αντίστοιχης εγγραφής στον πίνακα των χρηστών" + hindi "यूज़र टेबल में रिकॉर्ड नहीं मिला" hun "Nincs megegyezo sor a user tablaban" ita "Impossibile trovare la riga corrispondente nella tabella user" jpn "ユーザーテーブルに該当するレコードが見つかりません。" @@ -3057,6 +3172,7 @@ ER_CANT_REOPEN_TABLE est "Ei suuda taasavada tabelit '%-.192s'" fre "Impossible de réouvrir la table: '%-.192s" ger "Kann Tabelle'%-.192s' nicht erneut öffnen" + hindi "टेबल '%-.192s' फिर से खोल नहीं सकते" hun "Nem lehet ujra-megnyitni a tablat: '%-.192s" ita "Impossibile riaprire la tabella: '%-.192s'" jpn "表を再オープンできません。: '%-.192s'" @@ -3080,6 +3196,7 @@ ER_INVALID_USE_OF_NULL 22004 est "NULL väärtuse väärkasutus" fre "Utilisation incorrecte de la valeur NULL" ger "Unerlaubte Verwendung eines NULL-Werts" + hindi "NULL मान का अवैध उपयोग" hun "A NULL ervenytelen hasznalata" ita "Uso scorretto del valore NULL" jpn "NULL 値の使用方法が不適切です。" @@ -3099,6 +3216,7 @@ ER_REGEXP_ERROR 42000 est "regexp tagastas vea '%-.64s'" fre "Erreur '%-.64s' provenant de regexp" ger "regexp lieferte Fehler '%-.64s'" + hindi "regexp में '%-.64s' त्रुटि हुई" hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)" ita "Errore '%-.64s' da regexp" jpn "regexp がエラー '%-.64s' を返しました。" @@ -3220,6 +3338,7 @@ ER_GRANT_WRONG_HOST_OR_USER 42000 est "Masina või kasutaja nimi GRANT lauses on liiga pikk" fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long" ger "Das Host- oder User-Argument für GRANT ist zu lang" + hindi "GRANT के लिए होस्ट या यूज़र आर्गुमेंट बहुत लंबा है" hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban" ita "L'argomento host o utente per la GRANT e` troppo lungo" jpn "GRANTコマンドへの、ホスト名やユーザー名が長すぎます。" @@ -3239,6 +3358,7 @@ ER_NO_SUCH_TABLE 42S02 est "Tabelit '%-.192s.%-.192s' ei eksisteeri" fre "La table '%-.192s.%-.192s' n'existe pas" ger "Tabelle '%-.192s.%-.192s' existiert nicht" + hindi "टेबल '%-.192s.%-.192s' मौजूद नहीं है" hun "A '%-.192s.%-.192s' tabla nem letezik" ita "La tabella '%-.192s.%-.192s' non esiste" jpn "表 '%-.192s.%-.192s' は存在しません。" @@ -3281,6 +3401,7 @@ ER_NOT_ALLOWED_COMMAND 42000 est "Antud käsk ei ole lubatud käesolevas MariaDB versioonis" fre "Cette commande n'existe pas dans cette version de MariaDB" ger "Der verwendete Befehl ist in dieser MariaDB-Version nicht zulässig" + hindi "यह कमांड इस MariaDB संस्करण के साथ इस्तेमाल नहीं किया जा सकता है" hun "A hasznalt parancs nem engedelyezett ebben a MariaDB verzioban" ita "Il comando utilizzato non e` supportato in questa versione di MariaDB" jpn "このMySQLバージョンでは利用できないコマンドです。" @@ -3301,6 +3422,7 @@ ER_SYNTAX_ERROR 42000 fre "Erreur de syntaxe" ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen" greek "You have an error in your SQL syntax" + hindi "आपके SQL सिंटेक्स मैं गलती है; सही सिंटेक्स के लिए अपने MariaDB सर्वर संस्करण के मैन्युअल की सहायता लें" hun "Szintaktikai hiba" ita "Errore di sintassi nella query SQL" jpn "SQL構文エラーです。バージョンに対応するマニュアルを参照して正しい構文を確認してください。" @@ -3343,6 +3465,7 @@ ER_TOO_MANY_DELAYED_THREADS est "Liiga palju DELAYED lõimesid kasutusel" fre "Trop de tâche 'delayed' en cours" ger "Zu viele verzögerte (DELAYED) Threads in Verwendung" + hindi "बहुत से DELAYED थ्रेड्स उपयोग में हैं" hun "Tul sok kesletetett thread (delayed)" ita "Troppi threads ritardati in uso" jpn "'Delayed insert'スレッドが多すぎます。" @@ -3385,6 +3508,7 @@ ER_NET_PACKET_TOO_LARGE 08S01 est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga" fre "Paquet plus grand que 'max_allowed_packet' reçu" ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes" + hindi "'max_allowed_packet' से भी बड़ा एक पैकेट मिला" hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'" ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'" jpn "'max_allowed_packet'よりも大きなパケットを受信しました。" @@ -3404,6 +3528,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01 est "Viga ühendustoru lugemisel" fre "Erreur de lecture reçue du pipe de connexion" ger "Lese-Fehler bei einer Verbindungs-Pipe" + hindi "कनेक्शन पाइप से एक READ त्रुटि हुई" hun "Olvasasi hiba a kapcsolat soran" ita "Rilevato un errore di lettura dalla pipe di connessione" jpn "接続パイプの読み込みエラーです。" @@ -3423,6 +3548,7 @@ ER_NET_FCNTL_ERROR 08S01 est "fcntl() tagastas vea" fre "Erreur reçue de fcntl() " ger "fcntl() lieferte einen Fehler" + hindi "fcntl() से एक त्रुटि हुई" hun "Hiba a fcntl() fuggvenyben" ita "Rilevato un errore da fcntl()" jpn "fcntl()がエラーを返しました。" @@ -3442,6 +3568,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01 est "Paketid saabusid vales järjekorras" fre "Paquets reçus dans le désordre" ger "Pakete nicht in der richtigen Reihenfolge empfangen" + hindi "पैकेट्स क्रम में नहीं प्राप्त हुए" hun "Helytelen sorrendben erkezett adatcsomagok" ita "Ricevuti pacchetti non in ordine" jpn "不正な順序のパケットを受信しました。" @@ -3461,6 +3588,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01 est "Viga andmepaketi lahtipakkimisel" fre "Impossible de décompresser le paquet reçu" ger "Kommunikationspaket lässt sich nicht entpacken" + hindi "संचार पैकेट UNCOMPRESS नहीं कर सके" hun "A kommunikacios adatcsomagok nem tomorithetok ki" ita "Impossibile scompattare i pacchetti di comunicazione" jpn "圧縮パケットの展開ができませんでした。" @@ -3480,6 +3608,7 @@ ER_NET_READ_ERROR 08S01 est "Viga andmepaketi lugemisel" fre "Erreur de lecture des paquets reçus" ger "Fehler beim Lesen eines Kommunikationspakets" + hindi "संचार पैकेट्स पढ़ते समय एक त्रुटि हुई" hun "HIba a kommunikacios adatcsomagok olvasasa soran" ita "Rilevato un errore ricevendo i pacchetti di comunicazione" jpn "パケットの受信でエラーが発生しました。" @@ -3499,6 +3628,7 @@ ER_NET_READ_INTERRUPTED 08S01 est "Kontrollaja ületamine andmepakettide lugemisel" fre "Timeout en lecture des paquets reçus" ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets" + hindi "संचार पैकेट्स पढ़ने के दौरान टाइमआउट" hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran" ita "Rilevato un timeout ricevendo i pacchetti di comunicazione" jpn "パケットの受信でタイムアウトが発生しました。" @@ -3518,6 +3648,7 @@ ER_NET_ERROR_ON_WRITE 08S01 est "Viga andmepaketi kirjutamisel" fre "Erreur d'écriture des paquets envoyés" ger "Fehler beim Schreiben eines Kommunikationspakets" + hindi "संचार पैकेट्स लिखते समय एक त्रुटि हुई" hun "Hiba a kommunikacios csomagok irasa soran" ita "Rilevato un errore inviando i pacchetti di comunicazione" jpn "パケットの送信でエラーが発生しました。" @@ -3537,6 +3668,7 @@ ER_NET_WRITE_INTERRUPTED 08S01 est "Kontrollaja ületamine andmepakettide kirjutamisel" fre "Timeout d'écriture des paquets envoyés" ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets" + hindi "संचार पैकेट्स लिखने के दौरान टाइमआउट" hun "Idotullepes a kommunikacios csomagok irasa soran" ita "Rilevato un timeout inviando i pacchetti di comunicazione" jpn "パケットの送信でタイムアウトが発生しました。" @@ -3556,6 +3688,7 @@ ER_TOO_LONG_STRING 42000 est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga" fre "La chaîne résultat est plus grande que 'max_allowed_packet'" ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes" + hindi "रिजल्ट स्ट्रिंग 'max_allowed_packet' से लंबा है" hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'" ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'" jpn "結果の文字列が 'max_allowed_packet' よりも大きいです。" @@ -3574,6 +3707,7 @@ ER_TABLE_CANT_HANDLE_BLOB 42000 est "Valitud tabelitüüp (%s) ei toeta BLOB/TEXT tüüpi välju" fre "Ce type de table (%s) ne supporte pas les colonnes BLOB/TEXT" ger "Der verwendete Tabellentyp (%s) unterstützt keine BLOB- und TEXT-Felder" + hindi "स्टोरेज इंजन %s BLOB/TEXT कॉलम्स को सपोर्ट नहीं करता" hun "A hasznalt tabla tipus (%s) nem tamogatja a BLOB/TEXT mezoket" ita "Il tipo di tabella usata (%s) non supporta colonne di tipo BLOB/TEXT" por "Tipo de tabela usado (%s) não permite colunas BLOB/TEXT" @@ -3591,6 +3725,7 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 est "Valitud tabelitüüp (%s) ei toeta AUTO_INCREMENT tüüpi välju" fre "Ce type de table (%s) ne supporte pas les colonnes AUTO_INCREMENT" ger "Der verwendete Tabellentyp (%s) unterstützt keine AUTO_INCREMENT-Felder" + hindi "स्टोरेज इंजन %s AUTO_INCREMENT कॉलम्स को सपोर्ट नहीं करता" hun "A hasznalt tabla tipus (%s) nem tamogatja az AUTO_INCREMENT tipusu mezoket" ita "Il tipo di tabella usata (%s) non supporta colonne di tipo AUTO_INCREMENT" por "Tipo de tabela usado (%s) não permite colunas AUTO_INCREMENT" @@ -3632,6 +3767,7 @@ ER_WRONG_COLUMN_NAME 42000 est "Vigane tulba nimi '%-.100s'" fre "Nom de colonne '%-.100s' incorrect" ger "Falscher Spaltenname '%-.100s'" + hindi "कॉलम नाम '%-.100s' गलत है" hun "Ervenytelen mezonev: '%-.100s'" ita "Nome colonna '%-.100s' non corretto" jpn "列名 '%-.100s' は不正です。" @@ -3645,6 +3781,7 @@ ER_WRONG_COLUMN_NAME 42000 ER_WRONG_KEY_COLUMN 42000 eng "The storage engine %s can't index column %`s" ger "Die Speicher-Engine %s kann die Spalte %`s nicht indizieren" + hindi "स्टोरेज इंजन %s, कॉलम %`s को इंडेक्स नहीं कर सकता" rus "Обработчик таблиц %s не может проиндексировать столбец %`s" ukr "Вказівник таблиц %s не може індексувати стовбець %`s" ER_WRONG_MRG_TABLE @@ -3720,6 +3857,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000 est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit" fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE" ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden" + hindi "PRIMARY KEY के सभी भागों को NOT NULL होना चाहिए; यदि आपको एक KEY में NULL की जरूरत है, तो UNIQUE का उपयोग करें" hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot" ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE" jpn "PRIMARY KEYの列は全てNOT NULLでなければいけません。UNIQUE索引であればNULLを含むことが可能です。" @@ -3738,6 +3876,7 @@ ER_TOO_MANY_ROWS 42000 est "Tulemis oli rohkem kui üks kirje" fre "Le résultat contient plus d'un enregistrement" ger "Ergebnis besteht aus mehr als einer Zeile" + hindi "परिणाम एक से अधिक पंक्ति का है" hun "Az eredmeny tobb, mint egy sort tartalmaz" ita "Il risultato consiste di piu` di una riga" jpn "結果が2行以上です。" @@ -3756,6 +3895,7 @@ ER_REQUIRES_PRIMARY_KEY 42000 est "Antud tabelitüüp nõuab primaarset võtit" fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)" ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)" + hindi "इस प्रकार के टेबल को एक PRIMARY KEY की आवश्यकता है" hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo" ita "Questo tipo di tabella richiede una chiave primaria" jpn "使用のストレージエンジンでは、PRIMARY KEYが必要です。" @@ -3774,6 +3914,7 @@ ER_NO_RAID_COMPILED est "Antud MariaDB versioon on kompileeritud ilma RAID toeta" fre "Cette version de MariaDB n'est pas compilée avec le support RAID" ger "Diese MariaDB-Version ist nicht mit RAID-Unterstützung kompiliert" + hindi "MariaDB का यह संस्करण RAID सपोर्ट के साथ कॉम्पाईल्ड नहीं है" hun "Ezen leforditott MariaDB verzio nem tartalmaz RAID support-ot" ita "Questa versione di MYSQL non e` compilata con il supporto RAID" jpn "このバージョンのMySQLはRAIDサポートを含めてコンパイルされていません。" @@ -3809,6 +3950,7 @@ ER_KEY_DOES_NOT_EXITS 42000 S1009 est "Võti '%-.192s' ei eksisteeri tabelis '%-.192s'" fre "L'index '%-.192s' n'existe pas sur la table '%-.192s'" ger "Schlüssel '%-.192s' existiert in der Tabelle '%-.192s' nicht" + hindi "KEY '%-.192s', टेबल '%-.192s' में मौजूद नहीं है" hun "A '%-.192s' kulcs nem letezik a '%-.192s' tablaban" ita "La chiave '%-.192s' non esiste nella tabella '%-.192s'" jpn "索引 '%-.192s' は表 '%-.192s' には存在しません。" @@ -3826,6 +3968,7 @@ ER_CHECK_NO_SUCH_TABLE 42000 est "Ei suuda avada tabelit" fre "Impossible d'ouvrir la table" ger "Kann Tabelle nicht öffnen" + hindi "टेबल नहीं खुल सकता है" hun "Nem tudom megnyitni a tablat" ita "Impossibile aprire la tabella" jpn "表をオープンできません。" @@ -3844,6 +3987,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000 fre "Ce type de table ne supporte pas les %s" ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s" greek "The handler for the table doesn't support %s" + hindi "इस टेबल का स्टोरेज इंजन '%s' को सपोर्ट नहीं करता" hun "A tabla kezeloje (handler) nem tamogatja az %s" ita "Il gestore per la tabella non supporta il %s" jpn "この表のストレージエンジンは '%s' を利用できません。" @@ -3884,6 +4028,7 @@ ER_ERROR_DURING_COMMIT est "Viga %M käsu COMMIT täitmisel" fre "Erreur %M lors du COMMIT" ger "Fehler %M beim COMMIT" + hindi "COMMIT के दौरान %M त्रुटि हुई" hun "%M hiba a COMMIT vegrehajtasa soran" ita "Rilevato l'errore %M durante il COMMIT" jpn "COMMIT中にエラー %M が発生しました。" @@ -3901,6 +4046,7 @@ ER_ERROR_DURING_ROLLBACK est "Viga %M käsu ROLLBACK täitmisel" fre "Erreur %M lors du ROLLBACK" ger "Fehler %M beim ROLLBACK" + hindi "ROLLBACK के दौरान %M त्रुटि हुई" hun "%M hiba a ROLLBACK vegrehajtasa soran" ita "Rilevato l'errore %M durante il ROLLBACK" jpn "ROLLBACK中にエラー %M が発生しました。" @@ -3918,6 +4064,7 @@ ER_ERROR_DURING_FLUSH_LOGS est "Viga %M käsu FLUSH_LOGS täitmisel" fre "Erreur %M lors du FLUSH_LOGS" ger "Fehler %M bei FLUSH_LOGS" + hindi "FLUSH_LOGS के दौरान %M त्रुटि हुई" hun "%M hiba a FLUSH_LOGS vegrehajtasa soran" ita "Rilevato l'errore %M durante il FLUSH_LOGS" jpn "FLUSH_LOGS中にエラー %M が発生しました。" @@ -3935,6 +4082,7 @@ ER_ERROR_DURING_CHECKPOINT est "Viga %M käsu CHECKPOINT täitmisel" fre "Erreur %M lors du CHECKPOINT" ger "Fehler %M bei CHECKPOINT" + hindi "CHECKPOINT के दौरान %M त्रुटि हुई" hun "%M hiba a CHECKPOINT vegrehajtasa soran" ita "Rilevato l'errore %M durante il CHECKPOINT" jpn "CHECKPOINT中にエラー %M が発生しました。" @@ -4071,6 +4219,7 @@ ER_UNKNOWN_SYSTEM_VARIABLE est "Tundmatu süsteemne muutuja '%-.*s'" fre "Variable système '%-.*s' inconnue" ger "Unbekannte Systemvariable '%-.*s'" + hindi "अज्ञात सिस्टम वैरिएबल '%-.*s'" ita "Variabile di sistema '%-.*s' sconosciuta" jpn "'%-.*s' は不明なシステム変数です。" por "Variável de sistema '%-.*s' desconhecida" @@ -4209,6 +4358,7 @@ ER_TOO_MANY_USER_CONNECTIONS 42000 est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga" fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connexions actives" ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen" + hindi "यूज़र %-.64s के पहले से ही 'max_user_connections' से अधिक सक्रिय कनेक्शन्स हैं" ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive" jpn "ユーザー '%-.64s' はすでに 'max_user_connections' 以上のアクティブな接続を行っています。" por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas" @@ -4224,6 +4374,7 @@ ER_SET_CONSTANTS_ONLY est "Ainult konstantsed suurused on lubatud SET klauslis" fre "Seules les expressions constantes sont autorisées avec SET" ger "Bei diesem Befehl dürfen nur konstante Ausdrücke verwendet werden" + hindi "इस स्टेटमेंट में आप केवल CONSTANT EXPRESSIONS का उपयोग कर सकते हैं" ita "Si possono usare solo espressioni costanti con SET" jpn "SET処理が失敗しました。" por "Você pode usar apenas expressões constantes com SET" @@ -4254,6 +4405,7 @@ ER_LOCK_TABLE_FULL est "Lukkude koguarv ületab lukutabeli suuruse" fre "Le nombre total de verrou dépasse la taille de la table des verrous" ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle" + hindi "लॉक्स की कुल संख्या लॉक टेबल के साइज से अधिक है" ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock" jpn "ロックの数が多すぎます。" por "O número total de travamentos excede o tamanho da tabela de travamentos" @@ -4313,6 +4465,7 @@ ER_WRONG_ARGUMENTS est "Vigased parameetrid %s-le" fre "Mauvais arguments à %s" ger "Falsche Argumente für %s" + hindi "%s को गलत आर्ग्यूमेंट्स" ita "Argomenti errati a %s" jpn "%s の引数が不正です" por "Argumentos errados para %s" @@ -4327,6 +4480,7 @@ ER_NO_PERMISSION_TO_CREATE_USER 42000 est "Kasutajal '%s'@'%s' ei ole lubatud luua uusi kasutajaid" fre "'%s'@'%s' n'est pas autorisé à créer de nouveaux utilisateurs" ger "'%s'@'%s' ist nicht berechtigt, neue Benutzer hinzuzufügen" + hindi "'%s'@'%s' को नए यूज़र्स बनाने की अनुमति नहीं है" ita "A '%s'@'%s' non e' permesso creare nuovi utenti" por "Não é permitido a '%s'@'%s' criar novos usuários" rus "'%s'@'%s' не разрешается создавать новых пользователей" @@ -4366,6 +4520,7 @@ ER_TABLE_CANT_HANDLE_FT est "Antud tabelitüüp (%s) ei toeta FULLTEXT indekseid" fre "Le type de table utilisé (%s) ne supporte pas les index FULLTEXT" ger "Der verwendete Tabellentyp (%s) unterstützt keine FULLTEXT-Indizes" + hindi "स्टोरेज इंजन '%s' FULLTEXT इन्डेक्सेस को सपोर्ट नहीं करता" ita "La tabella usata (%s) non supporta gli indici FULLTEXT" por "O tipo de tabela utilizado (%s) não suporta índices de texto completo (fulltext indexes)" rus "Используемый тип таблиц (%s) не поддерживает полнотекстовых индексов" @@ -4840,6 +4995,7 @@ WARN_DATA_TRUNCATED 01000 ER_WARN_USING_OTHER_HANDLER eng "Using storage engine %s for table '%s'" ger "Speicher-Engine %s wird für Tabelle '%s' benutzt" + hindi "स्टोरेज इंजन %s का इस्तेमाल टेबल '%s' के लिए किया जा रहा है" jpn "ストレージエンジン %s が表 '%s' に利用されています。" por "Usando engine de armazenamento %s para tabela '%s'" spa "Usando motor de almacenamiento %s para tabla '%s'" @@ -4967,6 +5123,7 @@ ER_WARN_HOSTNAME_WONT_WORK ER_UNKNOWN_STORAGE_ENGINE 42000 eng "Unknown storage engine '%s'" ger "Unbekannte Speicher-Engine '%s'" + hindi "अज्ञात स्टोरेज इंजन '%s'" jpn "'%s' は不明なストレージエンジンです。" por "Motor de tabela desconhecido '%s'" spa "Desconocido motor de tabla '%s'" @@ -5061,15 +5218,19 @@ ER_SP_NO_RECURSIVE_CREATE 2F003 ER_SP_ALREADY_EXISTS 42000 eng "%s %s already exists" ger "%s %s existiert bereits" + hindi "%s %s पहले से ही मौजूद है" ER_SP_DOES_NOT_EXIST 42000 eng "%s %s does not exist" ger "%s %s existiert nicht" + hindi "%s %s मौजूद नहीं है" ER_SP_DROP_FAILED eng "Failed to DROP %s %s" ger "DROP %s %s ist fehlgeschlagen" + hindi "%s %s को ड्रॉप करने में असफल रहे" ER_SP_STORE_FAILED eng "Failed to CREATE %s %s" ger "CREATE %s %s ist fehlgeschlagen" + hindi "%s %s को बनाने में असफल रहे" ER_SP_LILABEL_MISMATCH 42000 eng "%s with no matching label: %s" ger "%s ohne passende Marke: %s" @@ -5088,9 +5249,11 @@ ER_SP_BADSELECT 0A000 ER_SP_BADRETURN 42000 eng "RETURN is only allowed in a FUNCTION" ger "RETURN ist nur innerhalb einer FUNCTION erlaubt" + hindi "RETURN को केवल FUNCTION में इस्तेमाल किया जा सकता है" ER_SP_BADSTATEMENT 0A000 eng "%s is not allowed in stored procedures" ger "%s ist in gespeicherten Prozeduren nicht erlaubt" + hindi "%s को STORED PROCEDURE में इस्तेमाल नहीं किया जा सकता है" ER_UPDATE_LOG_DEPRECATED_IGNORED 42000 eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored. This option will be removed in MariaDB 5.6" ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert. Diese Option wird in MariaDB 5.6 entfernt" @@ -5109,9 +5272,11 @@ ER_SP_COND_MISMATCH 42000 ER_SP_NORETURN 42000 eng "No RETURN found in FUNCTION %s" ger "Kein RETURN in FUNCTION %s gefunden" + hindi "FUNCTION %s में कोई RETURN है" ER_SP_NORETURNEND 2F005 eng "FUNCTION %s ended without RETURN" ger "FUNCTION %s endete ohne RETURN" + hindi "FUNCTION %s RETURN के बिना समाप्त हो गया" ER_SP_BAD_CURSOR_QUERY 42000 eng "Cursor statement must be a SELECT" ger "Cursor-Anweisung muss ein SELECT sein" @@ -5121,9 +5286,11 @@ ER_SP_BAD_CURSOR_SELECT 42000 ER_SP_CURSOR_MISMATCH 42000 eng "Undefined CURSOR: %s" ger "Undefinierter CURSOR: %s" + hindi "CURSOR %s अपरिभाषित है" ER_SP_CURSOR_ALREADY_OPEN 24000 eng "Cursor is already open" ger "Cursor ist schon geöffnet" + hindi "CURSOR पहले से ही खुला है" ER_SP_CURSOR_NOT_OPEN 24000 eng "Cursor is not open" ger "Cursor ist nicht geöffnet" @@ -5151,6 +5318,7 @@ ER_SP_DUP_CURS 42000 ER_SP_CANT_ALTER eng "Failed to ALTER %s %s" ger "ALTER %s %s fehlgeschlagen" + hindi "%s %s को ALTER करने में असफल रहे" ER_SP_SUBSELECT_NYI 0A000 eng "Subquery value not supported" ger "Subquery-Wert wird nicht unterstützt" @@ -5259,9 +5427,11 @@ ER_SP_GOTO_IN_HNDLR ER_TRG_ALREADY_EXISTS eng "Trigger already exists" ger "Trigger existiert bereits" + hindi "TRIGGER पहले से मौजूद है" ER_TRG_DOES_NOT_EXIST eng "Trigger does not exist" ger "Trigger existiert nicht" + hindi "TRIGGER मौजूद नहीं है" ER_TRG_ON_VIEW_OR_TEMP_TABLE eng "Trigger's '%-.192s' is view or temporary table" ger "'%-.192s' des Triggers ist View oder temporäre Tabelle" @@ -5277,6 +5447,7 @@ ER_NO_DEFAULT_FOR_FIELD ER_DIVISION_BY_ZERO 22012 eng "Division by 0" ger "Division durch 0" + hindi "0 से विभाजन" ER_TRUNCATED_WRONG_VALUE_FOR_FIELD 22007 eng "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %lu" ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.192s' in Zeile %lu" @@ -5314,6 +5485,7 @@ ER_BINLOG_PURGE_PROHIBITED ER_FSEEK_FAIL eng "Failed on fseek()" ger "fseek() fehlgeschlagen" + hindi "fseek() विफल रहा" ER_BINLOG_PURGE_FATAL_ERR eng "Fatal error during log purge" ger "Schwerwiegender Fehler bei der Log-Bereinigung" @@ -5597,12 +5769,15 @@ ER_REMOVED_SPACES ER_AUTOINC_READ_FAILED eng "Failed to read auto-increment value from storage engine" ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen" + hindi "स्टोरेज इंजन से auto-increment का मान पढ़ने में असफल रहे" ER_USERNAME eng "user name" ger "Benutzername" + hindi "यूज़र का नाम" ER_HOSTNAME eng "host name" ger "Hostname" + hindi "होस्ट का नाम" ER_WRONG_STRING_LENGTH eng "String '%-.70s' is too long for %s (should be no longer than %d)" ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)" @@ -5823,9 +5998,11 @@ ER_FILEGROUP_OPTION_ONLY_ONCE ER_CREATE_FILEGROUP_FAILED eng "Failed to create %s" ger "Anlegen von %s fehlgeschlagen" + hindi "%s को बनाने में असफल रहे" ER_DROP_FILEGROUP_FAILED eng "Failed to drop %s" ger "Löschen von %s fehlgeschlagen" + hindi "%s को हटाने में असफल रहे" ER_TABLESPACE_AUTO_EXTEND_ERROR eng "The handler doesn't support autoextend of tablespaces" ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces" @@ -5838,6 +6015,7 @@ ER_SIZE_OVERFLOW_ERROR ER_ALTER_FILEGROUP_FAILED eng "Failed to alter: %s" ger "Änderung von %s fehlgeschlagen" + hindi "%s को ALTER करने में असफल रहे" ER_BINLOG_ROW_LOGGING_FAILED eng "Writing one row to the row-based binary log failed" ger "Schreiben einer Zeilen ins zeilenbasierte Binärlog fehlgeschlagen" @@ -5859,9 +6037,11 @@ ER_EVENT_DOES_NOT_EXIST ER_EVENT_CANT_ALTER eng "Failed to alter event '%-.192s'" ger "Ändern des Events '%-.192s' fehlgeschlagen" + hindi "'%-.192s' EVENT को ALTER करने में असफल रहे" ER_EVENT_DROP_FAILED eng "Failed to drop %s" ger "Löschen von %s fehlgeschlagen" + hindi "%s को हटाने में असफल रहे" ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG eng "INTERVAL is either not positive or too big" ger "INTERVAL ist entweder nicht positiv oder zu groß" @@ -5874,6 +6054,7 @@ ER_EVENT_EXEC_TIME_IN_THE_PAST ER_EVENT_OPEN_TABLE_FAILED eng "Failed to open mysql.event" ger "Öffnen von mysql.event fehlgeschlagen" + hindi "mysql.event को खोलने में असफल रहे" ER_EVENT_NEITHER_M_EXPR_NOR_M_AT eng "No datetime expression provided" ger "Kein DATETIME-Ausdruck angegeben" @@ -5885,6 +6066,7 @@ ER_UNUSED_3 ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" ger "Löschen des Events aus mysql.event fehlgeschlagen" + hindi "EVENT को mysql.event से हटाने मैं असफल रहे" ER_EVENT_COMPILE_ERROR eng "Error during compilation of event's body" ger "Fehler beim Kompilieren des Event-Bodys" @@ -5924,6 +6106,7 @@ ER_UNUSED_13 ER_PARTITION_NO_TEMPORARY eng "Cannot create temporary table with partitions" ger "Anlegen temporärer Tabellen mit Partitionen nicht möglich" + hindi "अस्थाई टेबल को पार्टिशन्स के साथ नहीं बनाया जा सकता" ER_PARTITION_CONST_DOMAIN_ERROR eng "Partition constant is out of partition function domain" ger "Partitionskonstante liegt außerhalb der Partitionsfunktionsdomäne" @@ -5935,6 +6118,7 @@ ER_PARTITION_FUNCTION_IS_NOT_ALLOWED ER_DDL_LOG_ERROR eng "Error in DDL log" ger "Fehler im DDL-Log" + hindi "DDL लॉग में त्रुटि हुई" ER_NULL_IN_VALUES_LESS_THAN eng "Not allowed to use NULL value in VALUES LESS THAN" ger "In VALUES LESS THAN dürfen keine NULL-Werte verwendet werden" @@ -5942,6 +6126,7 @@ ER_NULL_IN_VALUES_LESS_THAN ER_WRONG_PARTITION_NAME eng "Incorrect partition name" ger "Falscher Partitionsname" + hindi "पार्टीशन का नाम गलत है" swe "Felaktigt partitionsnamn" ER_CANT_CHANGE_TX_CHARACTERISTICS 25001 eng "Transaction characteristics can't be changed while a transaction is in progress" @@ -5980,6 +6165,7 @@ ER_ONLY_INTEGERS_ALLOWED ER_UNSUPORTED_LOG_ENGINE eng "Storage engine %s cannot be used for log tables" ger "Speicher-Engine %s kann für Logtabellen nicht verwendet werden" + hindi "स्टोरेज इंजन %s को लॉग टेबल्स के लिए इस्तेमाल नहीं किया जा सकता है" ER_BAD_LOG_STATEMENT eng "You cannot '%s' a log table if logging is enabled" ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist" @@ -6055,6 +6241,7 @@ ER_SLAVE_RELAY_LOG_WRITE_FAILURE ER_SLAVE_CREATE_EVENT_FAILURE eng "Failed to create %s" ger "Erzeugen von %s fehlgeschlagen" + hindi "%s को बनाने मैं असफल रहे" ER_SLAVE_MASTER_COM_FAILURE eng "Master command %s failed: %s" ger "Master-Befehl %s fehlgeschlagen: %s" @@ -6162,22 +6349,27 @@ ER_DATABASE_NAME eng "Database" swe "Databas" ger "Datenbank" + hindi "डेटाबेस" ER_TABLE_NAME eng "Table" swe "Tabell" ger "Tabelle" + hindi "टेबल" ER_PARTITION_NAME eng "Partition" swe "Partition" ger "Partition" + hindi "पार्टीशन" ER_SUBPARTITION_NAME eng "Subpartition" swe "Subpartition" ger "Unterpartition" + hindi "सब-पार्टीशन" ER_TEMPORARY_NAME eng "Temporary" swe "Temporär" ger "Temporär" + hindi "अस्थायी" ER_RENAMED_NAME eng "Renamed" swe "Namnändrad" @@ -6299,6 +6491,7 @@ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT eng "Cannot modify @@session.binlog_format inside a transaction" ER_PATH_LENGTH eng "The path specified for %.64s is too long" + hindi "%.64s के लिए निर्दिष्ट पथ बहुत लंबा है" ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT eng "'%s' is deprecated and will be removed in a future release" ger "'%s' ist veraltet und wird in einer zukünftigen Version entfernt werden" @@ -6308,6 +6501,7 @@ ER_WRONG_NATIVE_TABLE_STRUCTURE ER_WRONG_PERFSCHEMA_USAGE eng "Invalid performance_schema usage" + hindi "performance_schema का अवैध उपयोग" ER_WARN_I_S_SKIPPED_TABLE eng "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement" @@ -6344,6 +6538,7 @@ ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN ER_FAILED_READ_FROM_PAR_FILE eng "Failed to read from the .par file" + hindi ".par फ़ाइल से पढ़ने में असफल रहे" swe "Misslyckades läsa från .par filen" ER_VALUES_IS_NOT_INT_TYPE_ERROR @@ -6359,6 +6554,7 @@ ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000 fre "Accès refusé pour l'utilisateur: '%s'@'%s'" ger "Benutzer '%s'@'%s' hat keine Zugriffsberechtigung" greek "Δεν επιτέρεται η πρόσβαση στο χρήστη: '%s'@'%s'" + hindi "यूज़र '%s'@'%s' को अनुमति नहीं है" hun "A(z) '%s'@'%s' felhasznalo szamara tiltott eleres" ita "Accesso non consentito per l'utente: '%s'@'%s'" kor "'%s'@'%s' 사용자는 접근이 거부 되었습니다." @@ -6935,6 +7131,7 @@ ER_ACCESS_DENIED_CHANGE_USER_ERROR 28000 ER_INNODB_READ_ONLY eng "InnoDB is in read only mode" + hindi "InnoDB केवल READ-ONLY मोड में है" ER_STOP_SLAVE_SQL_THREAD_TIMEOUT eng "STOP SLAVE command execution is incomplete: Slave SQL thread got the stop signal, thread is busy, SQL thread will stop once the current task is complete" @@ -6987,10 +7184,13 @@ ER_UNUSED_21 eng "" ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS eng "%s storage engine does not support generated columns" + hindi "स्टोरेज इंजन %s COMPUTED कॉलम्स को सपोर्ट नहीं करता" ER_UNKNOWN_OPTION eng "Unknown option '%-.64s'" + hindi "अज्ञात विकल्प '%-.64s'" ER_BAD_OPTION_VALUE eng "Incorrect value '%-.64s' for option '%-.64s'" + hindi "गलत मान '%-.64s' विकल्प '%-.64s' के लिए" ER_UNUSED_6 eng "You should never see it" ER_UNUSED_7 @@ -7013,14 +7213,17 @@ ER_DYN_COL_WRONG_CHARSET eng "Dynamic column contains unknown character set" ER_ILLEGAL_SUBQUERY_OPTIMIZER_SWITCHES eng "At least one of the 'in_to_exists' or 'materialization' optimizer_switch flags must be 'on'" + hindi "कम से कम 'in_to_exists' या 'materialization' optimizer_switch फ्लैग 'ON' होना चाहिए" ER_QUERY_CACHE_IS_DISABLED eng "Query cache is disabled (resize or similar command in progress); repeat this command later" ER_QUERY_CACHE_IS_GLOBALY_DISABLED eng "Query cache is globally disabled and you can't enable it only for this session" + hindi "क्वेरी कैश ग्लोबल स्तर पर DISABLED है और आप इसे केवल सत्र के लिए ENABLE नहीं कर सकते" ER_VIEW_ORDERBY_IGNORED eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already" ER_CONNECTION_KILLED 70100 eng "Connection was killed" + hindi "कनेक्शन को समाप्त कर दिया गया है" ER_UNUSED_12 eng "You should never see it" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION @@ -7031,6 +7234,7 @@ ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT eng "Query execution was interrupted. The query examined at least %llu rows, which exceeds LIMIT ROWS EXAMINED (%llu). The query result may be incomplete" ER_NO_SUCH_TABLE_IN_ENGINE 42S02 eng "Table '%-.192s.%-.192s' doesn't exist in engine" + hindi "टेबल '%-.192s.%-.192s' इंजन में मौजूद नहीं है" swe "Det finns ingen tabell som heter '%-.192s.%-.192s' i handlern" ER_TARGET_NOT_EXPLAINABLE eng "Target is not running an EXPLAINable command" @@ -7084,20 +7288,25 @@ ER_BINLOG_MUST_BE_EMPTY ER_NO_SUCH_QUERY eng "Unknown query id: %lld" ger "Unbekannte Abfrage-ID: %lld" + hindi "अज्ञात क्वेरी ID: %lld" rus "Неизвестный номер запроса: %lld" ER_BAD_BASE64_DATA eng "Bad base64 data as position %u" ER_INVALID_ROLE OP000 eng "Invalid role specification %`s" + hindi "अमान्य रोल विनिर्देश %`s" rum "Rolul %`s este invalid" ER_INVALID_CURRENT_USER 0L000 eng "The current user is invalid" + hindi "वर्तमान यूज़र अमान्य है" rum "Utilizatorul curent este invalid" ER_CANNOT_GRANT_ROLE eng "Cannot grant role '%s' to: %s" + hindi "रोल '%s', %s को प्रदान नहीं कर सकते" rum "Rolul '%s' nu poate fi acordat catre: %s" ER_CANNOT_REVOKE_ROLE eng "Cannot revoke role '%s' from: %s" + hindi "रोल '%s', %s से हटाया नहीं जा सका" rum "Rolul '%s' nu poate fi revocat de la: %s" ER_CHANGE_SLAVE_PARALLEL_THREADS_ACTIVE eng "Cannot change @@slave_parallel_threads while another change is in progress" @@ -7105,12 +7314,15 @@ ER_PRIOR_COMMIT_FAILED eng "Commit failed due to failure of an earlier commit on which this one depends" ER_IT_IS_A_VIEW 42S02 eng "'%-.192s' is a view" + hindi "'%-.192s' एक VIEW है" ER_SLAVE_SKIP_NOT_IN_GTID eng "When using parallel replication and GTID with multiple replication domains, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position" ER_TABLE_DEFINITION_TOO_BIG eng "The definition for table %`s is too big" + hindi "टेबल %`s की परिभाषा बहुत बड़ी है" ER_PLUGIN_INSTALLED eng "Plugin '%-.192s' already installed" + hindi "प्लग-इन '%-.192s' पहले से ही इन्स्टॉल्ड है" rus "Плагин '%-.192s' уже установлен" ER_STATEMENT_TIMEOUT 70100 eng "Query execution was interrupted (max_statement_time exceeded)" @@ -7122,22 +7334,29 @@ ER_UNUSED_17 eng "You should never see it" ER_USER_CREATE_EXISTS eng "Can't create user '%-.64s'@'%-.64s'; it already exists" + hindi "यूज़र '%-.64s'@'%-.64s' को नहीं बना सकते; यह पहले से ही मौजूद है" ER_USER_DROP_EXISTS eng "Can't drop user '%-.64s'@'%-.64s'; it doesn't exist" + hindi "यूज़र '%-.64s'@'%-.64s' को ड्रॉप नहीं कर सकते; यह मौजूद नहीं है" ER_ROLE_CREATE_EXISTS eng "Can't create role '%-.64s'; it already exists" + hindi "रोल '%-.64s' को नहीं बना सकते; यह पहले से ही मौजूद है" ER_ROLE_DROP_EXISTS eng "Can't drop role '%-.64s'; it doesn't exist" + hindi "रोल '%-.64s' को ड्रॉप नहीं कर सकते; यह मौजूद नहीं है" ER_CANNOT_CONVERT_CHARACTER eng "Cannot convert '%s' character 0x%-.64s to '%s'" ER_INVALID_DEFAULT_VALUE_FOR_FIELD 22007 eng "Incorrect default value '%-.128s' for column '%.192s'" + hindi "गलत डिफ़ॉल्ट मान '%-.128s' कॉलम '%.192s' के लिए" ER_KILL_QUERY_DENIED_ERROR eng "You are not owner of query %lu" ger "Sie sind nicht Eigentümer von Abfrage %lu" + hindi "आप क्वेरी %lu के OWNER नहीं हैं" rus "Вы не являетесь владельцем запроса %lu" ER_NO_EIS_FOR_FIELD eng "Engine-independent statistics are not collected for column '%s'" + hindi "Engine-independent सांख्यिकी कॉलम '%s' के लिए एकत्रित नहीं किया जा रहा है" ukr "Незалежна від типу таблиці статистика не збирається для стовбця '%s'" ER_WARN_AGGFUNC_DEPENDENCE eng "Aggregate function '%-.192s)' of SELECT #%d belongs to SELECT #%d" From 7507000ce2f3197c177a603d39ecfbdf9eeae21e Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 3 Aug 2017 17:28:46 +0400 Subject: [PATCH 21/34] Support for server error messages in Hindi This is an addition to original patch: activate hi_IN error messages. --- sql/sql_locale.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index 58443a9a97782..a2efa5e072cf2 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -32,7 +32,7 @@ enum err_msgs_index { en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK, - es_ES, sv_SE, uk_UA + es_ES, sv_SE, uk_UA, hi_IN } ERR_MSGS_INDEX; @@ -61,6 +61,7 @@ MY_LOCALE_ERRMSGS global_errmsgs[]= {"spanish", NULL}, {"swedish", NULL}, {"ukrainian", NULL}, + {"hindi", NULL}, {NULL, NULL} }; @@ -889,7 +890,7 @@ MY_LOCALE my_locale_hi_IN '.', /* decimal point hi_IN */ ',', /* thousands_sep hi_IN */ "\x03", /* grouping hi_IN */ - &global_errmsgs[en_US] + &global_errmsgs[hi_IN] ); /***** LOCALE END hi_IN *****/ From bcc10a5a447805ce64aa13ee6a037c1618219616 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 3 Aug 2017 17:31:05 +0400 Subject: [PATCH 22/34] Support for server error messages in Hindi Fixed plugins.locales failure. --- mysql-test/suite/plugins/r/locales.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/plugins/r/locales.result b/mysql-test/suite/plugins/r/locales.result index 881f91e0ac53f..106bf22923cb2 100644 --- a/mysql-test/suite/plugins/r/locales.result +++ b/mysql-test/suite/plugins/r/locales.result @@ -25,7 +25,7 @@ ID NAME DESCRIPTION MAX_MONTH_NAME_LENGTH MAX_DAY_NAME_LENGTH DECIMAL_POINT THOU 22 gl_ES Galician - Galician 8 8 , english 23 gu_IN Gujarati - India 10 8 . , english 24 he_IL Hebrew - Israel 7 5 . , english -25 hi_IN Hindi - India 7 9 . , english +25 hi_IN Hindi - India 7 9 . , hindi 26 hr_HR Croatian - Croatia 8 11 , english 27 hu_HU Hungarian - Hungary 10 9 , . hungarian 28 id_ID Indonesian - Indonesia 9 6 , . english @@ -138,7 +138,7 @@ Id Name Description Error_Message_Language 22 gl_ES Galician - Galician english 23 gu_IN Gujarati - India english 24 he_IL Hebrew - Israel english -25 hi_IN Hindi - India english +25 hi_IN Hindi - India hindi 26 hr_HR Croatian - Croatia english 27 hu_HU Hungarian - Hungary hungarian 28 id_ID Indonesian - Indonesia english From eda033255a59ee5ea098800e38a341920b3d5769 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 3 Aug 2017 15:16:40 +0000 Subject: [PATCH 23/34] Make "SET @@rocksdb_bulk_load=0" return an error instead of crashing the server - This is more in line with MariaDB environment - And help with rocksdb.bulk_load_errors test, too --- sql/sql_parse.cc | 3 ++- storage/rocksdb/ha_rocksdb.cc | 9 +++++++- .../rocksdb/r/bulk_load_errors.result | 12 ++++++++++- .../rocksdb/t/bulk_load_errors.test | 21 ++++++++----------- 4 files changed, 30 insertions(+), 15 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 4f0610cffcb2f..22a9970a45e65 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4821,7 +4821,8 @@ mysql_execute_command(THD *thd) goto error; if (!(res= sql_set_variables(thd, lex_var_list, true))) { - my_ok(thd); + if (!thd->is_error()) + my_ok(thd); } else { diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index a0b8cca0791d1..113efa662b97f 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -11493,7 +11493,14 @@ void rocksdb_set_bulk_load(THD *const thd, struct st_mysql_sys_var *const var sql_print_error("RocksDB: Error %d finalizing last SST file while " "setting bulk loading variable", rc); - abort_with_stack_traces(); + /* + MariaDB doesn't do the following: + abort_with_stack_traces(); + because it doesn't seem a good idea to crash a server when a user makes + a mistake. + Instead, we return an error to the user. The error has already been + produced inside ha_rocksdb::finalize_bulk_load(). + */ } } diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result index 31562d1da1036..eced62bd04327 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result @@ -14,6 +14,16 @@ INSERT INTO t1 VALUES(1); INSERT INTO t1 VALUES(2); INSERT INTO t1 VALUES(20); INSERT INTO t1 VALUES(21); +# +# In MyRocks, the following statement will intentionally crash the server. +# In MariaDB, it will cause an error SET rocksdb_bulk_load=0; -ERROR HY000: Lost connection to MySQL server during query +ERROR HY000: Rows inserted during bulk load must not overlap existing rows +# +# Despite the error, bulk load operation is over so the variable value +# will be 0: +select @@rocksdb_bulk_load; +@@rocksdb_bulk_load +0 +call mtr.add_suppression('finalizing last SST file while setting bulk loading variable'); DROP TABLE t1; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test index 284e29d1f5a98..bb3d8164200bb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/bulk_load_errors.test @@ -20,20 +20,17 @@ INSERT INTO t1 VALUES(2); INSERT INTO t1 VALUES(20); INSERT INTO t1 VALUES(21); -# This last crashes the server (intentionally) because we can't return any -# error information from a SET = ---exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect ---error 2013 +--echo # +--echo # In MyRocks, the following statement will intentionally crash the server. +--echo # In MariaDB, it will cause an error +--error ER_OVERLAPPING_KEYS SET rocksdb_bulk_load=0; ---exec grep "RocksDB: Error 197 finalizing last SST file while setting bulk loading variable" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 ---exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err +--echo # +--echo # Despite the error, bulk load operation is over so the variable value +--echo # will be 0: +select @@rocksdb_bulk_load; -# restart the crashed server ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect - -# Make sure the error exists in the .err log and then restart the server ---enable_reconnect ---source include/wait_until_connected_again.inc +call mtr.add_suppression('finalizing last SST file while setting bulk loading variable'); DROP TABLE t1; From 6d51817d2cd79edbc15328bef532a5375f184219 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Fri, 4 Aug 2017 13:33:48 +0400 Subject: [PATCH 24/34] Support for server error messages in Hindi Include Hindi error messages to debian packages. --- debian/mariadb-server-core-10.2.install | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/mariadb-server-core-10.2.install b/debian/mariadb-server-core-10.2.install index 9ab77b2f95acb..d882bd53f52a0 100644 --- a/debian/mariadb-server-core-10.2.install +++ b/debian/mariadb-server-core-10.2.install @@ -10,6 +10,7 @@ usr/share/mysql/estonian usr/share/mysql/french usr/share/mysql/german usr/share/mysql/greek +usr/share/mysql/hindi usr/share/mysql/hungarian usr/share/mysql/italian usr/share/mysql/japanese From 7925a4bce885401e3b3ea31862efa91026197889 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 5 Aug 2017 13:57:17 +0000 Subject: [PATCH 25/34] More comments --- storage/rocksdb/rdb_io_watchdog.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/rocksdb/rdb_io_watchdog.cc b/storage/rocksdb/rdb_io_watchdog.cc index cb58cc997fada..a599ba58aec50 100644 --- a/storage/rocksdb/rdb_io_watchdog.cc +++ b/storage/rocksdb/rdb_io_watchdog.cc @@ -21,6 +21,7 @@ #include #include +/* Rdb_io_watchdog doesn't work on Windows [yet] */ #ifndef _WIN32 namespace myrocks { From 11948d75862941e2780e550cbc5411895e1cc1c7 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Sun, 6 Aug 2017 16:27:37 +0400 Subject: [PATCH 26/34] MDEV-12180 ST_GeomFromGeoJSON option argument appears to have no effect. Implement the 'option' argument for the ST_GeomFromGeoJSON. --- mysql-test/r/gis-json.result | 13 ++++++++++ mysql-test/t/gis-json.test | 10 ++++++++ sql/item_geofunc.cc | 23 +++++++++++++++--- sql/spatial.cc | 47 +++++++++++++++++++++--------------- sql/spatial.h | 22 +++++++++-------- 5 files changed, 83 insertions(+), 32 deletions(-) diff --git a/mysql-test/r/gis-json.result b/mysql-test/r/gis-json.result index 8625a5bfb740a..8d7c2d12172fb 100644 --- a/mysql-test/r/gis-json.result +++ b/mysql-test/r/gis-json.result @@ -61,6 +61,19 @@ POINT(102 0.5) SELECT st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}')); st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}')) GEOMETRYCOLLECTION(POINT(102 0.5)) +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',5)); +ERROR HY000: Incorrect option value: '5' for function ST_GeometryFromJSON +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',1)); +ERROR 22023: Invalid GIS data provided to function ST_GeometryFromJSON. +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2)); +ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2)) +POINT(5.3 15) +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3)); +ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3)) +POINT(5.3 15) +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4)); +ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4)) +POINT(5.3 15) # # End of 10.2 tests # diff --git a/mysql-test/t/gis-json.test b/mysql-test/t/gis-json.test index 645c21bf0119f..67674c9b33fd4 100644 --- a/mysql-test/t/gis-json.test +++ b/mysql-test/t/gis-json.test @@ -23,6 +23,16 @@ SELECT st_astext(st_geomfromgeojson('{"type""point"}')); SELECT st_astext(st_geomfromgeojson('{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] } }')); SELECT st_astext(st_geomfromgeojson('{ "type": "FeatureCollection", "features": [{ "type": "Feature", "geometry": { "type": "Point", "coordinates": [102.0, 0.5] }, "properties": { "prop0": "value0" } }]}')); +--error ER_WRONG_VALUE_FOR_TYPE +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',5)); + +--error ER_GIS_INVALID_DATA +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',1)); + +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',2)); +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3)); +SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4)); + --echo # --echo # End of 10.2 tests --echo # diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 560d822bea758..8c9b61d98a177 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -131,13 +131,27 @@ String *Item_func_geometry_from_json::val_str(String *str) Geometry_buffer buffer; String *js= args[0]->val_str_ascii(&tmp_js); uint32 srid= 0; + longlong options= 0; json_engine_t je; if ((null_value= args[0]->null_value)) return 0; - if ((arg_count == 2) && !args[1]->null_value) - srid= (uint32)args[1]->val_int(); + if (arg_count > 1 && !args[1]->null_value) + { + options= args[1]->val_int(); + if (options > 4 || options < 1) + { + String *sv= args[1]->val_str(&tmp_js); + my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0), + "option", sv->c_ptr(), "ST_GeometryFromJSON"); + null_value= 1; + return 0; + } + } + + if ((arg_count == 3) && !args[2]->null_value) + srid= (uint32)args[2]->val_int(); str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) @@ -148,7 +162,7 @@ String *Item_func_geometry_from_json::val_str(String *str) json_scan_start(&je, js->charset(), (const uchar *) js->ptr(), (const uchar *) js->end()); - if ((null_value= !Geometry::create_from_json(&buffer, &je, str))) + if ((null_value= !Geometry::create_from_json(&buffer, &je, options==1, str))) { int code= 0; @@ -163,6 +177,9 @@ String *Item_func_geometry_from_json::val_str(String *str) case Geometry::GEOJ_POLYGON_NOT_CLOSED: code= ER_GEOJSON_NOT_CLOSED; break; + case Geometry::GEOJ_DIMENSION_NOT_SUPPORTED: + my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeometryFromJSON"); + break; default: report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN); return NULL; diff --git a/sql/spatial.cc b/sql/spatial.cc index 7c9d8bb771e55..095f7ff81dba6 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -339,7 +339,7 @@ Geometry *Geometry::create_from_wkb(Geometry_buffer *buffer, Geometry *Geometry::create_from_json(Geometry_buffer *buffer, - json_engine_t *je, String *res) + json_engine_t *je, bool er_on_3D, String *res) { Class_info *ci= NULL; const uchar *coord_start= NULL, *geom_start= NULL, @@ -514,14 +514,14 @@ Geometry *Geometry::create_from_json(Geometry_buffer *buffer, result= (*ci->m_create_func)(buffer->data); res->q_append((char) wkb_ndr); res->q_append((uint32) result->get_class_info()->m_type_id); - if (result->init_from_json(je, res)) + if (result->init_from_json(je, er_on_3D, res)) goto err_return; return result; handle_geometry_key: json_scan_start(je, je->s.cs, geometry_start, je->s.str_end); - return create_from_json(buffer, je, res); + return create_from_json(buffer, je, er_on_3D, res); err_return: return NULL; @@ -780,7 +780,8 @@ uint Gis_point::init_from_wkb(const char *wkb, uint len, } -static int read_point_from_json(json_engine_t *je, double *x, double *y) +static int read_point_from_json(json_engine_t *je, bool er_on_3D, + double *x, double *y) { int n_coord= 0, err; double tmp, *d; @@ -803,14 +804,17 @@ static int read_point_from_json(json_engine_t *je, double *x, double *y) n_coord++; } - return 0; + if (n_coord <= 2 || !er_on_3D) + return 0; + je->s.error= Geometry::GEOJ_DIMENSION_NOT_SUPPORTED; + return 1; bad_coordinates: je->s.error= Geometry::GEOJ_INCORRECT_GEOJSON; return 1; } -bool Gis_point::init_from_json(json_engine_t *je, String *wkb) +bool Gis_point::init_from_json(json_engine_t *je, bool er_on_3D, String *wkb) { double x, y; if (json_read_value(je)) @@ -822,7 +826,7 @@ bool Gis_point::init_from_json(json_engine_t *je, String *wkb) return TRUE; } - if (read_point_from_json(je, &x, &y) || + if (read_point_from_json(je, er_on_3D, &x, &y) || wkb->reserve(POINT_DATA_SIZE)) return TRUE; @@ -971,7 +975,8 @@ uint Gis_line_string::init_from_wkb(const char *wkb, uint len, } -bool Gis_line_string::init_from_json(json_engine_t *je, String *wkb) +bool Gis_line_string::init_from_json(json_engine_t *je, bool er_on_3D, + String *wkb) { uint32 n_points= 0; uint32 np_pos= wkb->length(); @@ -994,7 +999,7 @@ bool Gis_line_string::init_from_json(json_engine_t *je, String *wkb) { DBUG_ASSERT(je->state == JST_VALUE); - if (p.init_from_json(je, wkb)) + if (p.init_from_json(je, er_on_3D, wkb)) return TRUE; n_points++; } @@ -1364,7 +1369,7 @@ uint Gis_polygon::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, } -bool Gis_polygon::init_from_json(json_engine_t *je, String *wkb) +bool Gis_polygon::init_from_json(json_engine_t *je, bool er_on_3D, String *wkb) { uint32 n_linear_rings= 0; uint32 lr_pos= wkb->length(); @@ -1389,7 +1394,7 @@ bool Gis_polygon::init_from_json(json_engine_t *je, String *wkb) DBUG_ASSERT(je->state == JST_VALUE); uint32 ls_pos=wkb->length(); - if (ls.init_from_json(je, wkb)) + if (ls.init_from_json(je, er_on_3D, wkb)) return TRUE; ls.set_data_ptr(wkb->ptr() + ls_pos, wkb->length() - ls_pos); if (ls.is_closed(&closed) || !closed) @@ -1855,7 +1860,8 @@ uint Gis_multi_point::init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, } -bool Gis_multi_point::init_from_json(json_engine_t *je, String *wkb) +bool Gis_multi_point::init_from_json(json_engine_t *je, bool er_on_3D, + String *wkb) { uint32 n_points= 0; uint32 np_pos= wkb->length(); @@ -1883,7 +1889,7 @@ bool Gis_multi_point::init_from_json(json_engine_t *je, String *wkb) wkb->q_append((char) wkb_ndr); wkb->q_append((uint32) wkb_point); - if (p.init_from_json(je, wkb)) + if (p.init_from_json(je, er_on_3D, wkb)) return TRUE; n_points++; } @@ -2123,7 +2129,8 @@ uint Gis_multi_line_string::init_from_wkb(const char *wkb, uint len, } -bool Gis_multi_line_string::init_from_json(json_engine_t *je, String *wkb) +bool Gis_multi_line_string::init_from_json(json_engine_t *je, bool er_on_3D, + String *wkb) { uint32 n_line_strings= 0; uint32 ls_pos= wkb->length(); @@ -2151,7 +2158,7 @@ bool Gis_multi_line_string::init_from_json(json_engine_t *je, String *wkb) wkb->q_append((char) wkb_ndr); wkb->q_append((uint32) wkb_linestring); - if (ls.init_from_json(je, wkb)) + if (ls.init_from_json(je, er_on_3D, wkb)) return TRUE; n_line_strings++; @@ -2511,7 +2518,8 @@ uint Gis_multi_polygon::init_from_opresult(String *bin, } -bool Gis_multi_polygon::init_from_json(json_engine_t *je, String *wkb) +bool Gis_multi_polygon::init_from_json(json_engine_t *je, bool er_on_3D, + String *wkb) { uint32 n_polygons= 0; int np_pos= wkb->length(); @@ -2539,7 +2547,7 @@ bool Gis_multi_polygon::init_from_json(json_engine_t *je, String *wkb) wkb->q_append((char) wkb_ndr); wkb->q_append((uint32) wkb_polygon); - if (p.init_from_json(je, wkb)) + if (p.init_from_json(je, er_on_3D, wkb)) return TRUE; n_polygons++; @@ -2986,7 +2994,8 @@ uint Gis_geometry_collection::init_from_wkb(const char *wkb, uint len, } -bool Gis_geometry_collection::init_from_json(json_engine_t *je, String *wkb) +bool Gis_geometry_collection::init_from_json(json_engine_t *je, bool er_on_3D, + String *wkb) { uint32 n_objects= 0; uint32 no_pos= wkb->length(); @@ -3012,7 +3021,7 @@ bool Gis_geometry_collection::init_from_json(json_engine_t *je, String *wkb) DBUG_ASSERT(je->state == JST_VALUE); - if (!(g= create_from_json(&buffer, je, wkb))) + if (!(g= create_from_json(&buffer, je, er_on_3D, wkb))) return TRUE; *je= sav_je; diff --git a/sql/spatial.h b/sql/spatial.h index 3858c0d2e51c4..45f335596c802 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -255,6 +255,7 @@ class Geometry GEOJ_INCORRECT_GEOJSON= 1, GEOJ_TOO_FEW_POINTS= 2, GEOJ_POLYGON_NOT_CLOSED= 3, + GEOJ_DIMENSION_NOT_SUPPORTED= 4, }; @@ -281,7 +282,8 @@ class Geometry virtual uint init_from_opresult(String *bin, const char *opres, uint res_len) { return init_from_wkb(opres + 4, UINT_MAX32, wkb_ndr, bin) + 4; } - virtual bool init_from_json(json_engine_t *je, String *wkb) {return true;} + virtual bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb) + { return true; } virtual bool get_data_as_wkt(String *txt, const char **end) const=0; virtual bool get_data_as_json(String *txt, uint max_dec_digits, @@ -315,8 +317,8 @@ class Geometry bool init_stream=1); static Geometry *create_from_wkb(Geometry_buffer *buffer, const char *wkb, uint32 len, String *res); - static Geometry *create_from_json(Geometry_buffer *buffer, - json_engine_t *je, String *res); + static Geometry *create_from_json(Geometry_buffer *buffer, json_engine_t *je, + bool er_on_3D, String *res); static Geometry *create_from_opresult(Geometry_buffer *g_buf, String *res, Gcalc_result_receiver &rr); int as_wkt(String *wkt, const char **end); @@ -395,7 +397,7 @@ class Gis_point: public Geometry uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; @@ -450,7 +452,7 @@ class Gis_line_string: public Geometry uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; @@ -484,7 +486,7 @@ class Gis_polygon: public Geometry bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); uint init_from_opresult(String *bin, const char *opres, uint res_len); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; @@ -521,7 +523,7 @@ class Gis_multi_point: public Geometry bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); uint init_from_opresult(String *bin, const char *opres, uint res_len); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; @@ -550,7 +552,7 @@ class Gis_multi_line_string: public Geometry bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); uint init_from_opresult(String *bin, const char *opres, uint res_len); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; @@ -580,7 +582,7 @@ class Gis_multi_polygon: public Geometry uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; @@ -612,7 +614,7 @@ class Gis_geometry_collection: public Geometry bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); uint init_from_opresult(String *bin, const char *opres, uint res_len); - bool init_from_json(json_engine_t *je, String *wkb); + bool init_from_json(json_engine_t *je, bool er_on_3D, String *wkb); bool get_data_as_wkt(String *txt, const char **end) const; bool get_data_as_json(String *txt, uint max_dec_digits, const char **end) const; From 93a6eed60749ed81c0a842b2705b19f7c296c7c2 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sun, 6 Aug 2017 15:36:50 +0300 Subject: [PATCH 27/34] Backport to 10.2: Make rocksdb.type_varchar test stable It may produce test failures like this because of non-deterministic cost calculations: -1 SIMPLE t1 # col1 col1 259 NULL # Using where +1 SIMPLE t1 # col1 NULL NULL NULL # Using where --- .../mysql-test/rocksdb/r/type_varchar.result | 20 +++++++++---------- .../rocksdb/t/type_varchar_endspace.inc | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result index 365a9b7b3b3ad..c9fa716dffc10 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result @@ -206,10 +206,10 @@ a 61 ab 6162 # Must show 'using index' for latin1_bin and utf8_bin: explain -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 # col1 col1 67 NULL # Using where; Using index -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; col1 hex(col1) a 61202009 a 6120 @@ -321,10 +321,10 @@ a 61 ab 6162 # Must show 'using index' for latin1_bin and utf8_bin: explain -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 # col1 col1 195 NULL # Using where; Using index -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; col1 hex(col1) a 61202009 a 6120 @@ -436,10 +436,10 @@ a 0061 a 0061002000200009 # Must show 'using index' for latin1_bin and utf8_bin: explain -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 # col1 col1 131 NULL # Using where -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; col1 hex(col1) a 0061002000200009 a 00610020 @@ -551,10 +551,10 @@ a 61 a 61202009 # Must show 'using index' for latin1_bin and utf8_bin: explain -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 # col1 col1 259 NULL # Using where -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; col1 hex(col1) a 61202009 a 6120 @@ -666,10 +666,10 @@ a 0061 a 0061002000200009 # Must show 'using index' for latin1_bin and utf8_bin: explain -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 # col1 col1 259 NULL # Using where -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; col1 hex(col1) a 0061002000200009 a 00610020 diff --git a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc index bcca0c3a499f0..494f0ea139573 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc +++ b/storage/rocksdb/mysql-test/rocksdb/t/type_varchar_endspace.inc @@ -53,8 +53,8 @@ select col1, hex(col1) from t1; --echo # Must show 'using index' for latin1_bin and utf8_bin: --replace_column 4 # 9 # explain -select col1, hex(col1) from t1 where col1 < 'b'; -select col1, hex(col1) from t1 where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; +select col1, hex(col1) from t1 force index(col1) where col1 < 'b'; delete from t1; insert into t1 values(10, '', 'empty'); From c508691a9303579e5d614159bc872eeb65b8f84c Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 7 Aug 2017 11:04:09 +1000 Subject: [PATCH 28/34] travis: add clang-5.0 Also removed clang-3.9 Signed-off-by: Daniel Black --- .travis.compiler.sh | 4 ++-- .travis.yml | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.travis.compiler.sh b/.travis.compiler.sh index 6058d95a170c3..13e35fffe8737 100755 --- a/.travis.compiler.sh +++ b/.travis.compiler.sh @@ -8,8 +8,8 @@ if [[ "${TRAVIS_OS_NAME}" == 'linux' ]]; then CMAKE_OPT="${CMAKE_OPT} -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" fi case ${GCC_VERSION} in - 5) CXX=clang++-3.9 ;; - 6) CXX=clang++-4.0 ;; + 5) CXX=clang++-4.0 ;; + 6) CXX=clang++-5.0 ;; esac export CXX CC=${CXX/++/} elif [[ "${CXX}" == 'g++' ]]; then diff --git a/.travis.yml b/.travis.yml index 639a720fa3d21..f3253b5e1a9f3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -136,18 +136,17 @@ addons: apt: sources: - ubuntu-toolchain-r-test - - llvm-toolchain-trusty - - llvm-toolchain-trusty-3.9 - llvm-toolchain-trusty-4.0 + - sourceline: 'deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main' packages: # make sure these match the build requirements - gcc-5 - g++-5 - gcc-6 - g++-6 - - clang-3.9 - - llvm-3.9-dev - clang-4.0 - llvm-4.0-dev + - clang-5.0 + - llvm-5.0-dev - libasan0 - bison - chrpath From 4ff6ebf76af763f8e9e98b22773ac68cc2415aa1 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Mon, 7 Aug 2017 12:49:04 +0400 Subject: [PATCH 29/34] MDEV-12181 ST_AsGeoJSON argument does not limit decimals. Options handling implemented for ST_AsGeoJSON. --- mysql-test/r/gis-json.result | 15 +++++++++++ mysql-test/t/gis-json.test | 7 +++++ sql/item_geofunc.cc | 33 ++++++++++++++++++++++- sql/spatial.cc | 52 +++++++++++++++++++++++++++++++----- sql/spatial.h | 1 + 5 files changed, 100 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/gis-json.result b/mysql-test/r/gis-json.result index 8d7c2d12172fb..d888b08351de2 100644 --- a/mysql-test/r/gis-json.result +++ b/mysql-test/r/gis-json.result @@ -74,6 +74,21 @@ POINT(5.3 15) SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4)); ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4)) POINT(5.3 15) +SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),2); +ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),2) +{"type": "Point", "coordinates": [5.36, 7.27]} +SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),1); +ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),1) +{"type": "Point", "coordinates": [5.4, 7.3]} +SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),10); +ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),10) +{"type": "Point", "coordinates": [5.363, 7.266]} +SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 1); +ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 1) +{"bbox": [10, 11, 10, 11], "type": "Point", "coordinates": [10, 11]} +SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 5); +ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 5) +{"bbox": [10, 11, 10, 11], "type": "Point", "coordinates": [10, 11]} # # End of 10.2 tests # diff --git a/mysql-test/t/gis-json.test b/mysql-test/t/gis-json.test index 67674c9b33fd4..5e695fbca9c4d 100644 --- a/mysql-test/t/gis-json.test +++ b/mysql-test/t/gis-json.test @@ -33,6 +33,13 @@ SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15. SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',3)); SELECT ST_AsText(ST_GeomFromGeoJSON('{ "type": "Point", "coordinates": [5.3, 15.0, 4.3]}',4)); +SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),2); +SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),1); +SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(5.363 7.266)'),10); + +SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 1); +SELECT ST_AsGeoJSON(ST_GeomFromText("POINT(10 11)"), 100, 5); + --echo # --echo # End of 10.2 tests --echo # diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 8c9b61d98a177..3f0efbfa871ec 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -259,6 +259,8 @@ String *Item_func_as_geojson::val_str_ascii(String *str) DBUG_ASSERT(fixed == 1); String arg_val; String *swkb= args[0]->val_str(&arg_val); + uint max_dec= FLOATING_POINT_DECIMALS; + longlong options= 0; Geometry_buffer buffer; Geometry *geom= NULL; const char *dummy; @@ -268,12 +270,41 @@ String *Item_func_as_geojson::val_str_ascii(String *str) !(geom= Geometry::construct(&buffer, swkb->ptr(), swkb->length()))))) return 0; + if (arg_count > 1) + { + max_dec= (uint) args[1]->val_int(); + if (args[1]->null_value) + max_dec= FLOATING_POINT_DECIMALS; + if (arg_count > 2) + { + options= args[2]->val_int(); + if (args[2]->null_value) + options= 0; + } + } + str->length(0); str->set_charset(&my_charset_latin1); - if ((null_value= geom->as_json(str, FLOATING_POINT_DECIMALS, &dummy))) + + if (str->reserve(1, 512)) return 0; + str->qs_append('{'); + + if (options & 1) + { + if (geom->bbox_as_json(str) || str->append(", ", 2)) + goto error; + } + + if ((geom->as_json(str, max_dec, &dummy) || str->append("}", 1))) + goto error; + return str; + +error: + null_value= 1; + return 0; } diff --git a/sql/spatial.cc b/sql/spatial.cc index 095f7ff81dba6..1ba754b6b136e 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -21,6 +21,10 @@ #include "gstream.h" // Gis_read_stream #include "sql_string.h" // String +/* This is from item_func.h. Didn't want to #include the whole file. */ +double my_double_round(double value, longlong dec, bool dec_unsigned, + bool truncate); + #ifdef HAVE_SPATIAL /* @@ -250,6 +254,8 @@ static const uchar feature_type[]= "feature"; static const int feature_type_len= 7; static const uchar feature_coll_type[]= "featurecollection"; static const int feature_coll_type_len= 17; +static const uchar bbox_keyname[]= "bbox"; +static const int bbox_keyname_len= 4; int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end) @@ -258,7 +264,7 @@ int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end) if (wkt->reserve(4 + type_keyname_len + 2 + len + 2 + 2 + coord_keyname_len + 4, 512)) return 1; - wkt->qs_append("{\"", 2); + wkt->qs_append("\"", 1); wkt->qs_append((const char *) type_keyname, type_keyname_len); wkt->qs_append("\": \"", 4); wkt->qs_append(get_class_info()->m_geojson_name.str, len); @@ -269,10 +275,35 @@ int Geometry::as_json(String *wkt, uint max_dec_digits, const char **end) wkt->qs_append((const char *) coord_keyname, coord_keyname_len); wkt->qs_append("\": ", 3); - if (get_data_as_json(wkt, max_dec_digits, end) || - wkt->reserve(1)) + if (get_data_as_json(wkt, max_dec_digits, end)) return 1; - wkt->qs_append('}'); + + return 0; +} + + +int Geometry::bbox_as_json(String *wkt) +{ + MBR mbr; + const char *end; + if (wkt->reserve(5 + bbox_keyname_len + (FLOATING_POINT_DECIMALS+2)*4, 512)) + return 1; + wkt->qs_append("\"", 1); + wkt->qs_append((const char *) bbox_keyname, bbox_keyname_len); + wkt->qs_append("\": [", 4); + + if (get_mbr(&mbr, &end)) + return 1; + + wkt->qs_append(mbr.xmin); + wkt->qs_append(", ", 2); + wkt->qs_append(mbr.ymin); + wkt->qs_append(", ", 2); + wkt->qs_append(mbr.xmax); + wkt->qs_append(", ", 2); + wkt->qs_append(mbr.ymax); + wkt->qs_append("]", 1); + return 0; } @@ -670,6 +701,11 @@ static void append_json_point(String *txt, uint max_dec, const char *data) { double x,y; get_point(&x, &y, data); + if (max_dec < FLOATING_POINT_DECIMALS) + { + x= my_double_round(x, max_dec, FALSE, FALSE); + y= my_double_round(y, max_dec, FALSE, FALSE); + } txt->qs_append('['); txt->qs_append(x); txt->qs_append(", ", 2); @@ -3106,12 +3142,14 @@ bool Gis_geometry_collection::get_data_as_json(String *txt, uint max_dec_digits, if (!(geom= create_by_typeid(&buffer, wkb_type))) return 1; geom->set_data_ptr(data, (uint) (m_data_end - data)); - if (geom->as_json(txt, max_dec_digits, &data) || - txt->append(STRING_WITH_LEN(", "), 512)) + if (txt->append("{", 1) || + geom->as_json(txt, max_dec_digits, &data) || + txt->append(STRING_WITH_LEN("}, "), 512)) return 1; } txt->length(txt->length() - 2); - txt->qs_append(']'); + if (txt->append("]", 1)) + return 1; *end= data; return 0; diff --git a/sql/spatial.h b/sql/spatial.h index 45f335596c802..78e850dc2d794 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -323,6 +323,7 @@ class Geometry String *res, Gcalc_result_receiver &rr); int as_wkt(String *wkt, const char **end); int as_json(String *wkt, uint max_dec_digits, const char **end); + int bbox_as_json(String *wkt); inline void set_data_ptr(const char *data, uint32 data_len) { From f701ac65e951d373f62322147c785370fe7e4e25 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Mon, 7 Aug 2017 13:46:45 +0400 Subject: [PATCH 30/34] MDEV-12324 Wrong result (phantom array value) on JSON_EXTRACT. Fixed the path comparison. --- mysql-test/r/func_json.result | 6 ++++++ mysql-test/t/func_json.test | 6 ++++++ strings/json_lib.c | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_json.result b/mysql-test/r/func_json.result index 997d3e4d06272..894e46017f73a 100644 --- a/mysql-test/r/func_json.result +++ b/mysql-test/r/func_json.result @@ -642,3 +642,9 @@ SELECT JSON_KEYS(f) FROM t1 ORDER BY 1; JSON_KEYS(f) NULL DROP TABLE t1; +SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*].*' ); +JSON_EXTRACT( '{"foo":"bar"}', '$[*].*' ) +NULL +SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*]' ); +JSON_EXTRACT( '{"foo":"bar"}', '$[*]' ) +NULL diff --git a/mysql-test/t/func_json.test b/mysql-test/t/func_json.test index 0fc8e97b2843d..0ce742aac1123 100644 --- a/mysql-test/t/func_json.test +++ b/mysql-test/t/func_json.test @@ -296,3 +296,9 @@ INSERT INTO t1 VALUES (0); SELECT JSON_KEYS(f) FROM t1 ORDER BY 1; DROP TABLE t1; +# +# MDEV-12324 Wrong result (phantom array value) on JSON_EXTRACT. +# +SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*].*' ); +SELECT JSON_EXTRACT( '{"foo":"bar"}', '$[*]' ); + diff --git a/strings/json_lib.c b/strings/json_lib.c index 0e2a17b0ae0c8..7167b6a2a547e 100644 --- a/strings/json_lib.c +++ b/strings/json_lib.c @@ -1756,7 +1756,7 @@ int json_path_parts_compare( goto step_fits; goto step_failed; } - if (a->n_item == 0) + if ((a->type & JSON_PATH_WILD) == 0 && a->n_item == 0) goto step_fits_autowrap; goto step_failed; } From a33220fbefed74d9a63514e29914abc9896c6331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 7 Aug 2017 13:50:31 +0300 Subject: [PATCH 31/34] MDEV-13451 Assertion `!recv_no_ibuf_operations' failed in ibuf_page_low() During InnoDB startup, change buffer merge operations are prohibited before recv_apply_hashed_log_recs(true), which performs the last phase of redo log apply. Before this call, ibuf_init_at_db_start() would be invoked, and it could trigger the debug assertion. ibuf_init_at_db_start(): Do not declare the mini-transaction as "inside change buffer", because nothing is being written in the mini-transaction. The purpose of this function is only to initialize the memory data structures from the persistent data structures. --- storage/innobase/ibuf/ibuf0ibuf.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index b4cbe7d448088..d021f8c008591 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -534,7 +534,6 @@ ibuf_init_at_db_start(void) fseg_n_reserved_pages(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER, &n_used, &mtr); - ibuf_enter(&mtr); ut_ad(n_used >= 2); @@ -556,7 +555,7 @@ ibuf_init_at_db_start(void) mutex_exit(&ibuf_mutex); ibuf->empty = page_is_empty(root); - ibuf_mtr_commit(&mtr); + mtr.commit(); ibuf->index = dict_mem_index_create( "innodb_change_buffer", "CLUST_IND", From dcdc1c6d09b4a49b13fa8b1448064110bc296f86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 7 Aug 2017 13:54:37 +0300 Subject: [PATCH 32/34] MDEV-13452 Assertion `!recv_no_log_write' failed in log_reserve_and_open() The debug flag recv_no_log_write prohibits writes of redo log records for modifying page data. The debug assertion was failing when fil_names_clear() was writing the informative MLOG_FILE_NAME and MLOG_CHECKPOINT records which do not modify any data. log_reserve_and_open(), log_write_low(): Remove the debug assertion. log_pad_current_log_block(), mtr_write_log(), mtr_t::Command::prepare_write(): Add the debug assertion. --- storage/innobase/log/log0log.cc | 3 +-- storage/innobase/mtr/mtr0mtr.cc | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 9a61e2067a4bb..d892f22f967dd 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -359,7 +359,6 @@ log_reserve_and_open( loop: ut_ad(log_mutex_own()); - ut_ad(!recv_no_log_write); if (log_sys->is_extending) { log_mutex_exit(); @@ -416,7 +415,6 @@ log_write_low( ut_ad(log_mutex_own()); part_loop: - ut_ad(!recv_no_log_write); /* Calculate a part length */ data_len = (log->buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len; @@ -2291,6 +2289,7 @@ log_pad_current_log_block(void) ulint i; lsn_t lsn; + ut_ad(!recv_no_log_write); /* We retrieve lsn only because otherwise gcc crashed on HP-UX */ lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE); diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc index 0b9185ba5082c..faa00b1518be6 100644 --- a/storage/innobase/mtr/mtr0mtr.cc +++ b/storage/innobase/mtr/mtr0mtr.cc @@ -478,6 +478,7 @@ mtr_write_log( const ulint len = log->size(); mtr_write_log_t write_log; + ut_ad(!recv_no_log_write); DBUG_PRINT("ib_log", (ULINTPF " extra bytes written at " LSN_PF, len, log_sys->lsn)); @@ -799,6 +800,8 @@ mtr_t::release_page(const void* ptr, mtr_memo_type_t type) ulint mtr_t::Command::prepare_write() { + ut_ad(!recv_no_log_write); + switch (m_impl->m_log_mode) { case MTR_LOG_SHORT_INSERTS: ut_ad(0); From 0b30ce4f31cfbaad9582432d35e3c38464eba08e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 7 Aug 2017 16:04:38 +0300 Subject: [PATCH 33/34] MDEV-13374: Server crashes in first_linear_tab / st_select_lex::set_explain_type - Support first_linear_tab() traversal for degenerate joins --- mysql-test/r/win.result | 12 ++++++++++++ mysql-test/t/win.test | 11 +++++++++++ sql/sql_select.cc | 8 ++++++-- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/win.result b/mysql-test/r/win.result index c6707bd51bc90..14628cd0d4422 100644 --- a/mysql-test/r/win.result +++ b/mysql-test/r/win.result @@ -3173,3 +3173,15 @@ Nth_value(i,1) OVER() 1 1 DROP TABLE t1; +# +# A regression after MDEV-13351: +# MDEV-13374 : Server crashes in first_linear_tab / st_select_lex::set_explain_type +# upon UNION with aggregate function +# +CREATE TABLE t1 (i INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +SELECT i AS fld FROM t1 UNION SELECT COUNT(*) AS fld FROM t1; +fld +1 +2 +DROP TABLE t1; diff --git a/mysql-test/t/win.test b/mysql-test/t/win.test index 77ca755378da5..3dedc1227fd0f 100644 --- a/mysql-test/t/win.test +++ b/mysql-test/t/win.test @@ -1954,3 +1954,14 @@ UNION ALL ; DROP TABLE t1; +--echo # +--echo # A regression after MDEV-13351: +--echo # MDEV-13374 : Server crashes in first_linear_tab / st_select_lex::set_explain_type +--echo # upon UNION with aggregate function +--echo # + +CREATE TABLE t1 (i INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1),(2); +SELECT i AS fld FROM t1 UNION SELECT COUNT(*) AS fld FROM t1; +DROP TABLE t1; + diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 90f5e11dd167d..ba3760dd94869 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8627,7 +8627,7 @@ JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls JOIN_TAB *tab= join->join_tab; if (const_tbls == WITHOUT_CONST_TABLES) { - if (join->const_tables == join->table_count) + if (join->const_tables == join->table_count || !tab) return NULL; tab += join->const_tables; } @@ -8650,6 +8650,10 @@ JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls) { JOIN_TAB *first= join->join_tab; + + if (!first) + return NULL; + if (const_tbls == WITHOUT_CONST_TABLES) first+= join->const_tables; @@ -8736,7 +8740,7 @@ JOIN_TAB *first_depth_first_tab(JOIN* join) { JOIN_TAB* tab; /* This means we're starting the enumeration */ - if (join->const_tables == join->top_join_tab_count) + if (join->const_tables == join->top_join_tab_count || !join->join_tab) return NULL; tab= join->join_tab + join->const_tables; From 30c36b2c150d6bf52e56abfbef755119fbc773f9 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 7 Aug 2017 17:25:11 +0300 Subject: [PATCH 34/34] Make rocksdb.rocksdb_icp test stable --- storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result | 4 ++-- storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result index b2b6d7cdde9f9..9ef1ff28f10fb 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result @@ -240,12 +240,12 @@ A.a+10*B.a+100*C.a, from t0 A, t0 B, t0 C; set @count=0; explain -select * from t1 where key1=1; +select * from t1 force index(key1) where key1=1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ref key1 key1 9 const # set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context where table_schema=database() and table_name='t1' and stat_type='INTERNAL_KEY_SKIPPED_COUNT'); -select * from t1 where key1=1; +select * from t1 force index(key1) where key1=1; pk key1 col1 1 1 1234 set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_context diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test index 8bd93845e86d0..8d0ec89e85a75 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb_icp.test @@ -32,10 +32,10 @@ set @count_diff =(select (value - @count) from information_schema.rocksdb_perf_c --replace_column 9 # explain -select * from t1 where key1=1; +select * from t1 force index(key1) where key1=1; eval $save_query; -select * from t1 where key1=1; +select * from t1 force index(key1) where key1=1; eval $save_query; --echo # The following must be =1, or in any case not 999: select @count_diff as "INTERNAL_KEY_SKIPPED_COUNT increment";