Skip to content

Commit

Permalink
Merge branch 'merge-tokudb-5.6' into 10.0
Browse files Browse the repository at this point in the history
  • Loading branch information
cvicentiu committed May 16, 2017
2 parents c1b3aaa + 97c53cd commit 4cdae9c
Show file tree
Hide file tree
Showing 34 changed files with 1,816 additions and 187 deletions.
5 changes: 3 additions & 2 deletions storage/tokudb/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
SET(TOKUDB_VERSION 5.6.35-80.0)
SET(TOKUDB_VERSION 5.6.36-82.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
Expand Down Expand Up @@ -140,7 +140,8 @@ SET(TOKUDB_SOURCES
tokudb_background.cc
tokudb_information_schema.cc
tokudb_sysvars.cc
tokudb_thread.cc)
tokudb_thread.cc
tokudb_dir_cmd.cc)
MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY
LINK_LIBRARIES tokufractaltree_static tokuportability_static ${ZLIB_LIBRARY} stdc++)
SET(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -flto -fuse-linker-plugin")
Expand Down
3 changes: 3 additions & 0 deletions storage/tokudb/PerconaFT/buildheader/make_tdb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,9 @@ static void print_db_env_struct (void) {
"bool (*set_dir_per_db)(DB_ENV *, bool new_val)",
"bool (*get_dir_per_db)(DB_ENV *)",
"const char *(*get_data_dir)(DB_ENV *env)",
"int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)",
"int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)",
"int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)",
NULL};

sort_and_dump_fields("db_env", true, extra);
Expand Down
393 changes: 257 additions & 136 deletions storage/tokudb/PerconaFT/ft/ft-ops.cc

Large diffs are not rendered by default.

162 changes: 161 additions & 1 deletion storage/tokudb/PerconaFT/src/ydb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ const char *toku_copyright_string = "Copyright (c) 2006, 2015, Percona and/or it
#include "util/status.h"
#include "util/context.h"

#include <functional>

// Include ydb_lib.cc here so that its constructor/destructor gets put into
// ydb.o, to make sure they don't get erased at link time (when linking to
// a static libtokufractaltree.a that was compiled with gcc). See #5094.
Expand Down Expand Up @@ -1314,6 +1316,159 @@ static const char *env_get_data_dir(DB_ENV *env) {
return env->i->real_data_dir;
}

static int env_dirtool_attach(DB_ENV *env,
DB_TXN *txn,
const char *dname,
const char *iname) {
int r;
DBT dname_dbt;
DBT iname_dbt;

HANDLE_PANICKED_ENV(env);
if (!env_opened(env)) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);
toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1);

r = toku_db_put(env->i->directory,
txn,
&dname_dbt,
&iname_dbt,
0,
true);
return r;
}

static int env_dirtool_detach(DB_ENV *env,
DB_TXN *txn,
const char *dname) {
int r;
DBT dname_dbt;
DBT old_iname_dbt;

HANDLE_PANICKED_ENV(env);
if (!env_opened(env)) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);

toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
toku_init_dbt_flags(&old_iname_dbt, DB_DBT_REALLOC);

r = toku_db_get(env->i->directory,
txn,
&dname_dbt,
&old_iname_dbt,
DB_SERIALIZABLE); // allocates memory for iname
if (r == DB_NOTFOUND)
return EEXIST;
toku_free(old_iname_dbt.data);

r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true);

return r;
}

static int env_dirtool_move(DB_ENV *env,
DB_TXN *txn,
const char *old_dname,
const char *new_dname) {
int r;
DBT old_dname_dbt;
DBT new_dname_dbt;
DBT iname_dbt;

HANDLE_PANICKED_ENV(env);
if (!env_opened(env)) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);

toku_fill_dbt(&old_dname_dbt, old_dname, strlen(old_dname) + 1);
toku_fill_dbt(&new_dname_dbt, new_dname, strlen(new_dname) + 1);
toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);

r = toku_db_get(env->i->directory,
txn,
&old_dname_dbt,
&iname_dbt,
DB_SERIALIZABLE); // allocates memory for iname
if (r == DB_NOTFOUND)
return EEXIST;

r = toku_db_del(
env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true);
if (r != 0)
goto exit;

r = toku_db_put(
env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true);

exit:
toku_free(iname_dbt.data);
return r;
}

static int locked_env_op(DB_ENV *env,
DB_TXN *txn,
std::function<int(DB_TXN *)> f) {
int ret, r;
HANDLE_READ_ONLY_TXN(txn);
HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);

DB_TXN *child_txn = NULL;
int using_txns = env->i->open_flags & DB_INIT_TXN;
if (using_txns) {
ret = toku_txn_begin(env, txn, &child_txn, 0);
lazy_assert_zero(ret);
}

// cannot begin a checkpoint
toku_multi_operation_client_lock();
r = f(child_txn);
toku_multi_operation_client_unlock();

if (using_txns) {
if (r == 0) {
ret = locked_txn_commit(child_txn, 0);
lazy_assert_zero(ret);
} else {
ret = locked_txn_abort(child_txn);
lazy_assert_zero(ret);
}
}
return r;

}

static int locked_env_dirtool_attach(DB_ENV *env,
DB_TXN *txn,
const char *dname,
const char *iname) {
auto f = std::bind(
env_dirtool_attach, env, std::placeholders::_1, dname, iname);
return locked_env_op(env, txn, f);
}

static int locked_env_dirtool_detach(DB_ENV *env,
DB_TXN *txn,
const char *dname) {
auto f = std::bind(
env_dirtool_detach, env, std::placeholders::_1, dname);
return locked_env_op(env, txn, f);
}

static int locked_env_dirtool_move(DB_ENV *env,
DB_TXN *txn,
const char *old_dname,
const char *new_dname) {
auto f = std::bind(
env_dirtool_move, env, std::placeholders::_1, old_dname, new_dname);
return locked_env_op(env, txn, f);
}

static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags);

static int
Expand Down Expand Up @@ -2646,6 +2801,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) {
#define SENV(name) result->name = locked_env_ ## name
SENV(dbremove);
SENV(dbrename);
SENV(dirtool_attach);
SENV(dirtool_detach);
SENV(dirtool_move);
//SENV(set_noticecall);
#undef SENV
#define USENV(name) result->name = env_ ## name
Expand Down Expand Up @@ -2975,8 +3133,10 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u
if (txn && r) {
if (r == EMFILE || r == ENFILE)
r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n");
else
else if (r != ENOENT)
r = toku_ydb_do_error(env, r, "toku dbremove failed\n");
else
r = 0;
goto exit;
}
if (txn) {
Expand Down
28 changes: 10 additions & 18 deletions storage/tokudb/ha_tokudb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5255,17 +5255,17 @@ int ha_tokudb::fill_range_query_buf(
DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range");
goto cleanup;
} else if (result == ICP_NO_MATCH) {
// if we are performing a DESC ICP scan and have no end_range
// to compare to stop using ICP filtering as there isn't much more
// that we can do without going through contortions with remembering
// and comparing key parts.
// Optimizer change for MyRocks also benefits us here in TokuDB as
// opt_range.cc QUICK_SELECT::get_next now sets end_range during
// descending scan. We should not ever hit this condition, but
// leaving this code in to prevent any possibility of a descending
// scan to the beginning of an index and catch any possibility
// in debug builds with an assertion
assert_debug(!(!end_range && direction < 0));
if (!end_range &&
direction < 0) {

cancel_pushed_idx_cond();
DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate");
}

error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
Expand Down Expand Up @@ -6123,7 +6123,6 @@ int ha_tokudb::info(uint flag) {
stats.records = share->row_count() + share->rows_from_locked_table;
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;

error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
if (error) {
Expand All @@ -6133,20 +6132,13 @@ int ha_tokudb::info(uint flag) {
// we should always have a primary key
assert_always(share->file != NULL);

error = estimate_num_rows(share->file, &num_rows, txn);
if (error == 0) {
share->set_row_count(num_rows, false);
stats.records = num_rows;
} else {
goto cleanup;
}

DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
if (error) {
goto cleanup;
}

share->set_row_count(dict_stats.bt_ndata, false);
stats.records = dict_stats.bt_ndata;
stats.create_time = dict_stats.bt_create_time_sec;
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
Expand Down Expand Up @@ -7849,7 +7841,7 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range*
// As a result, equal may be 0 and greater may actually be equal+greater
// So, we call key_range64 on the key, and the key that is after it.
if (!start_key && !end_key) {
error = estimate_num_rows(kfile, &rows, transaction);
error = estimate_num_rows(share->file, &rows, transaction);
if (error) {
ret_val = HA_TOKUDB_RANGE_COUNT;
goto cleanup;
Expand Down
70 changes: 70 additions & 0 deletions storage/tokudb/mysql-test/tokudb/r/bug-1657908.result
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
SET GLOBAL tokudb_dir_per_db=ON;
CREATE PROCEDURE create_table()
BEGIN
CREATE TABLE test.t1 (
a INT
) ENGINE = TokuDB
PARTITION BY RANGE (a)
(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB,
PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB,
PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB,
PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB
);
END|
### Create partitioned table
CALL create_table();
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
t1_P_p100_main_id.tokudb
t1_P_p100_status_id.tokudb
t1_P_p300_main_id.tokudb
t1_P_p300_status_id.tokudb
t1_P_p400_main_id.tokudb
t1_P_p400_status_id.tokudb
t1_P_p_to_del_main_id.tokudb
t1_P_p_to_del_status_id.tokudb
### Stop server
### Remove 'main' file of one of the partitions
### Start server
### Make sure 'main' partition file is deleted
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
t1_P_p100_main_id.tokudb
t1_P_p100_status_id.tokudb
t1_P_p300_main_id.tokudb
t1_P_p300_status_id.tokudb
t1_P_p400_main_id.tokudb
t1_P_p400_status_id.tokudb
t1_P_p_to_del_status_id.tokudb
### Make sure the table still exists
SHOW TABLES;
Tables_in_test
t1
### Drop table
DROP TABLE t1;
### Make sure the table is dropped
SHOW TABLES;
Tables_in_test
### Check what files still exist after DROP TABLE
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
### Remove the rest of the files
### Make sure there are no tokudb files
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
### Create the same table once more
CALL create_table();
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
t1_P_p100_main_id.tokudb
t1_P_p100_status_id.tokudb
t1_P_p300_main_id.tokudb
t1_P_p300_status_id.tokudb
t1_P_p400_main_id.tokudb
t1_P_p400_status_id.tokudb
t1_P_p_to_del_main_id.tokudb
t1_P_p_to_del_status_id.tokudb
### Restore state
DROP TABLE t1;
DROP PROCEDURE create_table;
SET GLOBAL tokudb_dir_per_db=default;
Loading

0 comments on commit 4cdae9c

Please sign in to comment.