Browse files

Hook up iterators, update stats and add code to merge pending in.

  • Loading branch information...
1 parent 10746bf commit 64f8731926242ced701ee2631e89be9b3ced88f4 @jonmeredith jonmeredith committed Aug 3, 2011
Showing with 633 additions and 113 deletions.
  1. +440 −74 c_src/bitcask_nifs.c
  2. +4 −0 c_src/erl_nif_compat.h
  3. +9 −0 ebin/bitcask.app
  4. +14 −2 src/bitcask.erl
  5. +145 −31 src/bitcask_nifs.erl
  6. +21 −6 test/bitcask_qc.erl
View
514 c_src/bitcask_nifs.c
@@ -33,6 +33,22 @@
#include "khash.h"
#include "murmurhash.h"
+#ifdef BITCASK_DEBUG
+#include <stdio.h>
+#include <stdarg.h>
+void DEBUG(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+#else
+# define DEBUG(X, ...) {}
+#endif
+
+
+
static ErlNifResourceType* bitcask_keydir_RESOURCE;
static ErlNifResourceType* bitcask_lock_RESOURCE;
@@ -47,12 +63,13 @@ typedef struct
char key[0];
} bitcask_keydir_entry;
+
static khint_t keydir_entry_hash(bitcask_keydir_entry* entry);
static khint_t keydir_entry_equal(bitcask_keydir_entry* lhs,
bitcask_keydir_entry* rhs);
KHASH_INIT(entries, bitcask_keydir_entry*, char, 0, keydir_entry_hash, keydir_entry_equal);
-typedef enum { TOTAL = 0, LIVE = 1, PENDING = 2 } fstat_count_type;
+typedef enum { TOTAL = 0, LIVE = 1, PENDING = 2, NO_STATS = -1 } fstat_count_type;
typedef struct
{
@@ -78,6 +95,10 @@ typedef struct
unsigned int refcount;
unsigned int keyfolders;
uint64_t pending_updated;
+ uint64_t pending_start; // os:timestamp() as 64-bit integer
+ ErlNifPid* pending_awaken; // processes to wake once pending merged into entries
+ unsigned int pending_awaken_count;
+ unsigned int pending_awaken_size;
ErlNifMutex* mutex;
char is_ready;
char name[0];
@@ -119,6 +140,12 @@ typedef struct
#define LOCK(keydir) { if (keydir->mutex) enif_mutex_lock(keydir->mutex); }
#define UNLOCK(keydir) { if (keydir->mutex) enif_mutex_unlock(keydir->mutex); }
+// Pending tombstones
+#define is_pending_tombstone(e) ((e)->tstamp == 0 && \
+ (e)->offset == 0)
+#define set_pending_tombstone(e) {(e)->tstamp = 0; \
+ (e)->offset = 0; }
+
// Atoms (initialized in on_load)
static ERL_NIF_TERM ATOM_ALLOCATION_ERROR;
static ERL_NIF_TERM ATOM_ALREADY_EXISTS;
@@ -136,6 +163,7 @@ static ERL_NIF_TERM ATOM_LOCK_NOT_WRITABLE;
static ERL_NIF_TERM ATOM_NOT_FOUND;
static ERL_NIF_TERM ATOM_NOT_READY;
static ERL_NIF_TERM ATOM_OK;
+static ERL_NIF_TERM ATOM_OUT_OF_DATE;
static ERL_NIF_TERM ATOM_PREAD_ERROR;
static ERL_NIF_TERM ATOM_PWRITE_ERROR;
static ERL_NIF_TERM ATOM_READY;
@@ -169,6 +197,7 @@ ERL_NIF_TERM bitcask_nifs_lock_writedata(ErlNifEnv* env, int argc, const ERL_NIF
ERL_NIF_TERM errno_atom(ErlNifEnv* env, int error);
ERL_NIF_TERM errno_error_tuple(ErlNifEnv* env, ERL_NIF_TERM key, int error);
+static void merge_pending_entries(ErlNifEnv* env, bitcask_keydir* keydir);
static void lock_release(bitcask_lock_handle* handle);
static void bitcask_nifs_keydir_resource_cleanup(ErlNifEnv* env, void* arg);
@@ -183,7 +212,7 @@ static ErlNifFunc nif_funcs[] =
{"keydir_remove", 2, bitcask_nifs_keydir_remove},
{"keydir_remove_int", 5, bitcask_nifs_keydir_remove},
{"keydir_copy", 1, bitcask_nifs_keydir_copy},
- {"keydir_itr", 1, bitcask_nifs_keydir_itr},
+ {"keydir_itr_int", 4, bitcask_nifs_keydir_itr},
{"keydir_itr_next_int", 1, bitcask_nifs_keydir_itr_next},
{"keydir_itr_release", 1, bitcask_nifs_keydir_itr_release},
{"keydir_info", 1, bitcask_nifs_keydir_info},
@@ -312,11 +341,29 @@ ERL_NIF_TERM bitcask_nifs_keydir_mark_ready(ErlNifEnv* env, int argc, const ERL_
}
}
-static void update_fstats(ErlNifEnv* env, bitcask_keydir* keydir,
- uint32_t file_id,
- fstat_count_type type,
- int32_t keys_increment, int32_t total_keys_increment,
- int32_t bytes_increment, int32_t total_bytes_increment)
+static void dump_fstats(bitcask_keydir* keydir)
+{
+ bitcask_fstats_entry* curr_f;
+ khiter_t itr;
+ for (itr = kh_begin(keydir->fstats); itr != kh_end(keydir->fstats); ++itr)
+ {
+ if (kh_exist(keydir->fstats, itr))
+ {
+ curr_f = kh_val(keydir->fstats, itr);
+ DEBUG("fstats %d pending=(%d,%d) live=(%d,%d) total=(%d,%d)\r\n",
+ (int) curr_f->file_id,
+ (int) curr_f->counts[PENDING].keys,
+ (int) curr_f->counts[PENDING].bytes,
+ (int) curr_f->counts[LIVE].keys,
+ (int) curr_f->counts[LIVE].bytes,
+ (int) curr_f->counts[TOTAL].keys,
+ (int) curr_f->counts[TOTAL].bytes);
+ }
+ }
+}
+
+static bitcask_fstats_entry* lookup_fstats(ErlNifEnv* env, bitcask_keydir* keydir,
+ unsigned int file_id)
{
bitcask_fstats_entry* entry = 0;
khiter_t itr = kh_get(fstats, keydir->fstats, file_id);
@@ -333,13 +380,99 @@ static void update_fstats(ErlNifEnv* env, bitcask_keydir* keydir,
{
entry = kh_val(keydir->fstats, itr);
}
+ return entry;
+}
+
+static void update_fstats(ErlNifEnv* env, bitcask_keydir* keydir,
+ bitcask_keydir_entry* cur_entry,
+ fstat_count_type cur_type,
+ bitcask_keydir_entry* upd_entry,
+ fstat_count_type upd_type)
+{
+ bitcask_fstats_entry* cur_fstats;
+ bitcask_fstats_entry* upd_fstats;
+
+ if (cur_entry != NULL)
+ cur_fstats = lookup_fstats(env, keydir, cur_entry->file_id);
+ else
+ cur_fstats = NULL;
+
+ if (upd_entry == NULL)
+ upd_fstats = NULL;
+ else if (cur_entry != NULL && upd_entry->file_id == cur_entry->file_id)
+ upd_fstats = cur_fstats;
+ else
+ upd_fstats = lookup_fstats(env, keydir, upd_entry->file_id);
+
+ DEBUG("fstats cur_entry->file_id=%d cur_type=%d upd_entry->file_id=%d upd_type=%d\r\n",
+ cur_entry == NULL ? -1 : cur_entry->file_id, cur_type,
+ upd_entry == NULL ? -1 : upd_entry->file_id, upd_type);
+
+ // Remove an entry
+ if (upd_entry == NULL)
+ {
+ cur_fstats->counts[cur_type].keys--;
+ cur_fstats->counts[cur_type].bytes -= cur_entry->total_sz;
+ }
+ // Add an entry (or update a pending tomstone)
+ else if (cur_entry == NULL || is_pending_tombstone(cur_entry))
+ {
+ upd_fstats->counts[upd_type].keys++;
+ upd_fstats->counts[upd_type].bytes += upd_entry->total_sz;
+ upd_fstats->counts[TOTAL].keys++;
+ upd_fstats->counts[TOTAL].bytes += upd_entry->total_sz;
+ }
+ // Updating or moving an entry
+ else
+ {
+ // If both live, both pending or updating during pending merge
+ // then update the totals
+ if (cur_type == upd_type)
+ {
+ cur_fstats->counts[cur_type].keys--;
+ cur_fstats->counts[cur_type].bytes -= cur_entry->total_sz;
+ upd_fstats->counts[upd_type].keys++;
+ upd_fstats->counts[upd_type].bytes += upd_entry->total_sz;
+ }
+ else if (upd_type == LIVE) // Merge pending into live
+ {
+ // assert(cur_type == PENDING);
+
+ // Adjust live count that was frozen when iteration started if
+ // not a move.
+ if (cur_entry != upd_entry)
+ {
+ upd_fstats->counts[upd_type].keys--;
+ upd_fstats->counts[upd_type].bytes -= upd_entry->total_sz;
+ }
+
+ // Convert pending to live
+ cur_fstats->counts[cur_type].keys--;
+ cur_fstats->counts[cur_type].bytes -= cur_entry->total_sz;
+ cur_fstats->counts[upd_type].keys++;
+ cur_fstats->counts[upd_type].bytes += cur_entry->total_sz;
+ }
+ else // Adding first entry to pending - live counts cannot change
+ {
+ // assert(cur_type == LIVE);
+ // assert(upd_type == PENDING);
+ upd_fstats->counts[upd_type].keys++;
+ upd_fstats->counts[upd_type].bytes += upd_entry->total_sz;
+ }
+
+ // If an update (not a move or merge), totals changed
+ if (cur_entry != upd_entry && // a move
+ !(cur_type == PENDING && upd_type == LIVE)) // NOT a pending merge
+ {
+ upd_fstats->counts[TOTAL].keys++;
+ upd_fstats->counts[TOTAL].bytes += upd_entry->total_sz;
+ }
+ }
- entry->counts[TOTAL].keys += total_keys_increment;
- entry->counts[TOTAL].bytes += total_bytes_increment;
- entry->counts[type].keys += keys_increment;
- entry->counts[type].bytes += bytes_increment;
+ dump_fstats(keydir);
}
+
static khint_t keydir_entry_hash(bitcask_keydir_entry* entry)
{
return MURMUR_HASH(entry->key, entry->key_sz, 42);
@@ -420,7 +553,8 @@ static int find_keydir_entry(ErlNifEnv* env, bitcask_keydir* keydir, ErlNifBinar
}
// Allocate, populate and add entry to the keydir hash based on the key and entry structure
-static void add_entry(ErlNifEnv* env, bitcask_keydir* keydir, entries_hash_t* hash,
+static bitcask_keydir_entry* add_entry(ErlNifEnv* env, bitcask_keydir* keydir,
+ entries_hash_t* hash, fstat_count_type type,
ErlNifBinary* key, bitcask_keydir_entry* entry)
{
bitcask_keydir_entry* new_entry = enif_alloc_compat(env,
@@ -434,56 +568,35 @@ static void add_entry(ErlNifEnv* env, bitcask_keydir* keydir, entries_hash_t* ha
memcpy(new_entry->key, key->data, key->size);
kh_put_set(entries, hash, new_entry);
- // Update the stats
- keydir->key_count++;
- keydir->key_bytes += key->size;
-
- // First entry for this key -- increment both live and total counters
- update_fstats(env, keydir, entry->file_id, LIVE, 1, 1,
- entry->total_sz, entry->total_sz);
-
+ return new_entry;
}
-// Update an entry with newer information and adjust statistics
-static void update_entry(ErlNifEnv* env, bitcask_keydir* keydir,
- bitcask_keydir_entry* old_entry,
- bitcask_keydir_entry* new_entry)
+// Move an entry from pending into entries
+static void move_pending_entry(ErlNifEnv* env, bitcask_keydir* keydir,
+ khiter_t pend_itr, bitcask_keydir_entry* entry)
{
- // Entry already exists. Decrement live counter on the fstats entry
- // for the old file ID and update both counters for new file. Note
- // that this only needs to be done if the file_ids are not the
- // same.
- if (old_entry->file_id != new_entry->file_id)
- {
- update_fstats(env, keydir, old_entry->file_id, LIVE, -1, 0,
- -1 * old_entry->total_sz, 0);
- update_fstats(env, keydir, new_entry->file_id, LIVE, 1, 1,
- new_entry->total_sz, new_entry->total_sz);
- }
- else // file_id is same, change live/total in one entry
- {
- update_fstats(env, keydir, new_entry->file_id, LIVE, 0, 1,
- new_entry->total_sz - old_entry->total_sz, new_entry->total_sz);
+ kh_put_set(entries, keydir->entries, entry);
+ // no need to delete from pending entry, it will be freed as a whole
+}
- }
+// Update the current entry with newer information
+static void update_entry(ErlNifEnv* env, bitcask_keydir* keydir,
+ bitcask_keydir_entry* cur_entry,
+ bitcask_keydir_entry* upd_entry)
+{
// Update the entry info. Note that if you do multiple updates in a
// second, the last one in wins!
// TODO: Safe?
- old_entry->file_id = new_entry->file_id;
- old_entry->total_sz = new_entry->total_sz;
- old_entry->offset = new_entry->offset;
- old_entry->tstamp = new_entry->tstamp;
+ cur_entry->file_id = upd_entry->file_id;
+ cur_entry->total_sz = upd_entry->total_sz;
+ cur_entry->offset = upd_entry->offset;
+ cur_entry->tstamp = upd_entry->tstamp;
}
static void remove_entry(ErlNifEnv* env, bitcask_keydir* keydir, khiter_t itr,
bitcask_keydir_entry* entry)
{
- // Update fstats for the current file id -- one less live
- // key is the assumption here.
- update_fstats(env, keydir, entry->file_id, LIVE, -1, 0,
- -1 * entry->total_sz, 0);
-
kh_del(entries, keydir->entries, itr);
}
@@ -502,22 +615,33 @@ ERL_NIF_TERM bitcask_nifs_keydir_put_int(ErlNifEnv* env, int argc, const ERL_NIF
enif_get_uint(env, argv[5], &(entry.tstamp)))
{
khiter_t itr;
- entries_hash_t* hash;
+ entries_hash_t* old_hash;
bitcask_keydir_entry* old_entry;
bitcask_keydir* keydir = handle->keydir;
LOCK(keydir);
- if (keydir->keyfolders > 0)
- {
- UNLOCK(keydir);
- return enif_make_tuple2(env, ATOM_ERROR, ATOM_ITERATION_IN_PROCESS);
- }
+ DEBUG("+++ Put file_id=%d offset=%d total_sz=%d\r\n",
+ (int) entry.file_id, (int) entry.offset,
+ (int)entry.total_sz);
// Now that we've marshalled everything, see if the tstamp for this key is >=
// to what's already in the hash. Otherwise, we don't bother with the update.
- if (!find_keydir_entry(env, keydir, &key, &hash, &itr, &old_entry))
+ if (!find_keydir_entry(env, keydir, &key, &old_hash, &itr, &old_entry))
{
- add_entry(env, keydir, keydir->entries, &key, &entry);
+ if (keydir->pending == NULL)
+ {
+ keydir->key_count++;
+ keydir->key_bytes += key.size;
+
+ update_fstats(env, keydir, NULL, NO_STATS, &entry, LIVE);
+ add_entry(env, keydir, keydir->entries, LIVE, &key, &entry);
+ }
+ else
+ {
+ update_fstats(env, keydir, NULL, NO_STATS, &entry, PENDING);
+ add_entry(env, keydir, keydir->pending, PENDING, &key, &entry);
+
+ }
UNLOCK(keydir);
return ATOM_OK;
}
@@ -531,7 +655,25 @@ ERL_NIF_TERM bitcask_nifs_keydir_put_int(ErlNifEnv* env, int argc, const ERL_NIF
((old_entry->file_id == entry.file_id) &&
(old_entry->offset < entry.offset))))
{
- update_entry(env, keydir, old_entry, &entry);
+ // not folding
+ if (keydir->pending == NULL)
+ {
+ update_fstats(env, keydir, old_entry, LIVE, &entry, LIVE);
+ update_entry(env, keydir, old_entry, &entry);
+
+ }
+ else if (old_hash == keydir->pending) // the old_entry already in pending
+ {
+ update_fstats(env, keydir, old_entry, PENDING, &entry, PENDING);
+ update_entry(env, keydir, old_entry, &entry);
+ }
+ else
+ {
+ // old_entry is in entries - add to keydir->pending and update
+ // live fstats
+ update_fstats(env, keydir, old_entry, LIVE, &entry, PENDING);
+ add_entry(env, keydir, keydir->pending, PENDING, &key, &entry);
+ }
UNLOCK(keydir);
return ATOM_OK;
}
@@ -544,8 +686,7 @@ ERL_NIF_TERM bitcask_nifs_keydir_put_int(ErlNifEnv* env, int argc, const ERL_NIF
{
// Increment the total # of keys and total size for the entry that
// was NOT stored in the keydir.
- update_fstats(env, keydir, entry.file_id, LIVE, 0, 1,
- 0, entry.total_sz);
+ update_fstats(env, keydir, NULL, NO_STATS, &entry, LIVE);
}
UNLOCK(keydir);
return ATOM_ALREADY_EXISTS;
@@ -570,8 +711,10 @@ ERL_NIF_TERM bitcask_nifs_keydir_get_int(ErlNifEnv* env, int argc, const ERL_NIF
bitcask_keydir* keydir = handle->keydir;
LOCK(keydir);
- if (find_keydir_entry(env, keydir, &key, NULL, NULL, &entry)) // &&
- // t.b.c !is_pending_tombstone(entry))
+ DEBUG("+++ Get issued\r\n");
+
+ if (find_keydir_entry(env, keydir, &key, NULL, NULL, &entry) &&
+ !is_pending_tombstone(entry))
{
ERL_NIF_TERM result = enif_make_tuple6(env,
ATOM_BITCASK_ENTRY,
@@ -580,11 +723,13 @@ ERL_NIF_TERM bitcask_nifs_keydir_get_int(ErlNifEnv* env, int argc, const ERL_NIF
enif_make_uint(env, entry->total_sz),
enif_make_uint64_bin(env, entry->offset),
enif_make_uint(env, entry->tstamp));
+ DEBUG(" ... returned value\r\n");
UNLOCK(keydir);
return result;
}
else
{
+ DEBUG(" ... not_found\r\n");
UNLOCK(keydir);
return ATOM_NOT_FOUND;
}
@@ -610,6 +755,8 @@ ERL_NIF_TERM bitcask_nifs_keydir_remove(ErlNifEnv* env, int argc, const ERL_NIF_
bitcask_keydir* keydir = handle->keydir;
LOCK(keydir);
+ DEBUG("+++ Remove\r\n");
+
if (find_keydir_entry(env, keydir, &key, &hash, &itr, &entry))
{
// If this call has 5 arguments, this is a conditional removal. We
@@ -650,26 +797,35 @@ ERL_NIF_TERM bitcask_nifs_keydir_remove(ErlNifEnv* env, int argc, const ERL_NIF_
// Remove the entry and update file stats
remove_entry(env, keydir, itr, entry);
+ update_fstats(env, keydir, entry, LIVE, NULL, NO_STATS);
enif_free_compat(env, entry);
}
// If found an entry in the pending hash, convert it to a tombstone
else if (keydir->pending == hash)
{
- // t.b.c
+ // If not already a tomstone, update stats and make it one
+ if (!is_pending_tombstone(entry))
+ {
+ set_pending_tombstone(entry);
+ update_fstats(env, keydir, entry, PENDING, NULL, NO_STATS);
+ }
}
- // Otherwise add a tombstone to the pending hash
+ // Otherwise add a tombstone to the pending hash (iteration must have
+ // started between put/remove call in bitcask:delete.
+ // No stats to log as nothing is written to any files and the
+ // on-disk tombstone was already written by put_int so is already
+ // accounted for.
else
{
- // t.b.c
- /* bitcask_keydir_entry* ts_entry = add_entry(env, keydir->pending, entry); */
- /* set_pending_tombstone(ts_entry); */
- // Adjust stats?
+ bitcask_keydir_entry* pending_entry =
+ add_entry(env, keydir,keydir->pending, NO_STATS, &key, entry);
+ set_pending_tombstone(pending_entry);
}
UNLOCK(keydir);
return ATOM_OK;
}
- else // entry not found, nothing to update
+ else // entry not found, should not get here in normal operation nothing to update
{
UNLOCK(keydir);
return ATOM_OK;;
@@ -718,6 +874,22 @@ ERL_NIF_TERM bitcask_nifs_keydir_copy(ErlNifEnv* env, int argc, const ERL_NIF_TE
kh_put_set(entries, new_keydir->entries, new);
}
}
+ if (keydir->pending != NULL)
+ {
+ for (itr = kh_begin(keydir->pending); itr != kh_end(keydir->pending); ++itr)
+ {
+ // Allocate our entry to be inserted into the new table and copy the record
+ // over.
+ if (kh_exist(keydir->pending, itr))
+ {
+ bitcask_keydir_entry* curr = kh_key(keydir->pending, itr);
+ size_t new_sz = sizeof(bitcask_keydir_entry) + curr->key_sz;
+ bitcask_keydir_entry* new = enif_alloc_compat(env, new_sz);
+ memcpy(new, curr, new_sz);
+ kh_put_set(entries, new_keydir->pending, new);
+ }
+ }
+ }
// Deep copy fstats info
for (itr = kh_begin(keydir->fstats); itr != kh_end(keydir->fstats); ++itr)
@@ -732,6 +904,11 @@ ERL_NIF_TERM bitcask_nifs_keydir_copy(ErlNifEnv* env, int argc, const ERL_NIF_TE
}
}
+ if (keydir->pending != NULL)
+ {
+ merge_pending_entries(env, keydir);
+ }
+
UNLOCK(keydir);
ERL_NIF_TERM result = enif_make_resource(env, new_handle);
@@ -744,26 +921,83 @@ ERL_NIF_TERM bitcask_nifs_keydir_copy(ErlNifEnv* env, int argc, const ERL_NIF_TE
}
}
+// Helper for bitcask_nifs_keydir_itr to decide if it is valid to iterate over entries.
+// Check the number of updates since pending was created is less than the maximum
+// and that the current view is not too old
+static int can_itr_keydir(bitcask_keydir* keydir, uint64_t ts, int maxage, int maxputs)
+{
+ if (keydir->pending == NULL)
+ {
+ return 1;
+ }
+ else if (ts == 0 || ts < keydir->pending_start)
+ { // if clock skew (or forced wait), force key folding to wait
+ return 0; // which will fix keydir->pending_start
+ }
+ else
+ {
+ uint64_t age = ts - keydir->pending_start;
+ return (maxage < 0 || age <= maxage) &&
+ (keydir->pending_updated < 0 || keydir->pending_updated <= maxputs);
+ }
+}
+
ERL_NIF_TERM bitcask_nifs_keydir_itr(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
bitcask_keydir_handle* handle;
if (enif_get_resource(env, argv[0], bitcask_keydir_RESOURCE, (void**)&handle))
{
+ uint64_t ts;
+ int maxage;
+ int maxputs;
+
LOCK(handle->keydir);
+ DEBUG("+++ itr\r\n");
+ bitcask_keydir* keydir = handle->keydir;
// If a iterator thread is already active for this keydir, bail
if (handle->iterating)
{
UNLOCK(handle->keydir);
return enif_make_tuple2(env, ATOM_ERROR, ATOM_ITERATION_IN_PROCESS);
}
- handle->iterating = 1;
- handle->keydir->keyfolders++;
- handle->iterator = kh_begin(handle->keydir->entries);
- UNLOCK(handle->keydir);
- return ATOM_OK;
+ if (!(enif_get_uint64_bin(env, argv[1], (uint64_t*)&ts) &&
+ enif_get_int(env, argv[2], (int*)&maxage) &&
+ enif_get_int(env, argv[3], (int*)&maxputs)))
+ {
+ UNLOCK(handle->keydir);
+ return enif_make_badarg(env);
+ }
+
+ if (can_itr_keydir(keydir, ts, maxage, maxputs))
+ {
+ if (keydir->pending == NULL)
+ {
+ keydir->pending = kh_init(entries);
+ keydir->pending_start = ts;
+ }
+ handle->iterating = 1;
+ keydir->keyfolders++;
+ handle->iterator = kh_begin(keydir->entries);
+ UNLOCK(handle->keydir);
+ return ATOM_OK;
+ }
+ else
+ {
+ // Grow the pending_awaken array if necessary
+ if (keydir->pending_awaken_count == keydir->pending_awaken_size)
+ {
+ keydir->pending_awaken_size += 16;
+ size_t size = keydir->pending_awaken_size * sizeof(keydir->pending_awaken[0]);
+ keydir->pending_awaken = enif_realloc_compat(env, keydir->pending_awaken, size);
+ }
+ enif_self(env, &keydir->pending_awaken[keydir->pending_awaken_count]);
+ keydir->pending_awaken_count++;
+ UNLOCK(handle->keydir);
+ return ATOM_OUT_OF_DATE;
+ }
}
else
{
@@ -846,6 +1080,10 @@ ERL_NIF_TERM bitcask_nifs_keydir_itr_release(ErlNifEnv* env, int argc, const ERL
handle->iterating = 0;
handle->keydir->keyfolders--;
+ if (handle->keydir->keyfolders == 0)
+ {
+ merge_pending_entries(env, handle->keydir);
+ }
UNLOCK(handle->keydir);
return ATOM_OK;
}
@@ -1117,6 +1355,134 @@ ERL_NIF_TERM errno_error_tuple(ErlNifEnv* env, ERL_NIF_TERM key, int error)
enif_make_tuple2(env, key, errno_atom(env, error)));
}
+// Send messages to all processes that want to be awoken next time
+// pending is merged.
+static void msg_pending_awaken(ErlNifEnv* env, bitcask_keydir* keydir,
+ ERL_NIF_TERM msg)
+{
+ ErlNifEnv* msg_env = enif_alloc_env();
+ int idx;
+ for (idx = 0; idx < keydir->pending_awaken_count; idx++)
+ {
+ enif_clear_env(msg_env);
+ enif_send(env, &keydir->pending_awaken[idx], msg_env, msg);
+ }
+ enif_free_env(msg_env);
+}
+
+// Merge pending hash into entries hash and awaken any pids that want to
+// start iterating once we are merged. keydir must be locked before calling.
+static void merge_pending_entries(ErlNifEnv* env, bitcask_keydir* keydir)
+{
+ /* DEBUG("merge skipped\r\n"); */
+ /* return; */
+ DEBUG("before merge key count = %ld\r\n", keydir->key_count);
+ dump_fstats(keydir);
+
+ khiter_t pend_itr;
+ for (pend_itr = kh_begin(keydir->pending); pend_itr != kh_end(keydir->pending); ++pend_itr)
+ {
+ if (kh_exist(keydir->pending, pend_itr))
+ {
+ bitcask_keydir_entry* pending_entry = kh_key(keydir->pending, pend_itr);
+ khiter_t ent_itr = kh_get(entries, keydir->entries, pending_entry);
+
+ DEBUG("Pending Entry: key=%s key_sz=%d file_id=%d tstamp=%u offset=%u size=%d\r\n",
+ pending_entry->key, pending_entry->key_sz,
+ pending_entry->file_id,
+ (unsigned int) pending_entry->tstamp,
+ (unsigned int) pending_entry->offset,
+ pending_entry->total_sz);
+
+ if (ent_itr == kh_end(keydir->entries))
+ {
+ /* entries: empty, pending:tombstone */
+ if (is_pending_tombstone(pending_entry))
+ {
+ /* nop - stats were not updated when tombstone written for
+ ** empty entry
+ */
+ enif_free_compat(env, pending_entry);
+ }
+ /* entries: empty, pending:value */
+ else
+ {
+ // Update the keydir stats
+ keydir->key_count++;
+ keydir->key_bytes += pending_entry->key_sz;
+
+ update_fstats(env, keydir, pending_entry, PENDING, pending_entry, LIVE);
+ move_pending_entry(env, keydir, pend_itr, pending_entry);
+ // do not free - now in entries
+ }
+ }
+ else
+ {
+ bitcask_keydir_entry* entries_entry = kh_key(keydir->entries, ent_itr);
+ DEBUG("Entries Entry: key=%s key_sz=%d file_id=%d statmp=%u offset=%u size=%d\r\n",
+ entries_entry->key, entries_entry->key_sz,
+ entries_entry->file_id,
+ (unsigned int) entries_entry->tstamp,
+ (unsigned int) entries_entry->offset,
+ entries_entry->total_sz);
+
+ /* entries: present, pending:tombstone */
+ if (is_pending_tombstone(pending_entry))
+ {
+ // Update the keydir stats
+ keydir->key_count--;
+ keydir->key_bytes -= pending_entry->key_sz;
+
+ update_fstats(env, keydir, entries_entry, LIVE, NULL, NO_STATS);
+ remove_entry(env, keydir, ent_itr, entries_entry);
+ enif_free_compat(env, entries_entry);
+ }
+ /* entries: present, pending:value */
+ else
+ {
+ // adjust key/byte counts on the main entry then subtract from pending
+ // so it can be checked at the end of merge.
+ update_fstats(env, keydir, pending_entry, PENDING, entries_entry, LIVE);
+ update_entry(env, keydir, entries_entry, pending_entry);
+ }
+ enif_free_compat(env, pending_entry);
+ }
+ }
+ }
+ DEBUG("after merge key count = %ld\r\n", keydir->key_count);
+
+ // Check the keydirs are correct
+ dump_fstats(keydir);
+ bitcask_fstats_entry* curr_f;
+ khiter_t itr;
+ for (itr = kh_begin(keydir->fstats); itr != kh_end(keydir->fstats); ++itr)
+ {
+ if (kh_exist(keydir->fstats, itr))
+ {
+ curr_f = kh_val(keydir->fstats, itr);
+ //assert(curr_f->counts[PENDING].keys == 0);
+ //assert(curr_f->counts[PENDING].bytes == 0);
+ }
+ }
+
+ // Wake up all sleeping pids
+ msg_pending_awaken(env, keydir, ATOM_READY);
+
+ // Free all resources for keydir folding
+ kh_destroy(entries, keydir->pending);
+ keydir->pending = NULL;
+
+ keydir->pending_updated = 0;
+ keydir->pending_start = 0;
+ enif_free_compat(env, keydir->pending_awaken);
+ keydir->pending_awaken = NULL;
+ keydir->pending_awaken_count = 0;
+ keydir->pending_awaken_size = 0;
+
+ DEBUG("Merge completed\r\n");
+}
+
+
static void lock_release(bitcask_lock_handle* handle)
{
if (handle->fd > 0)
View
4 c_src/erl_nif_compat.h
@@ -31,6 +31,7 @@ extern "C" {
#define enif_release_resource_compat enif_release_resource
#define enif_alloc_binary_compat enif_alloc_binary
#define enif_alloc_compat enif_alloc
+#define enif_realloc_compat enif_realloc
#define enif_free_compat enif_free
#define enif_cond_create erl_drv_cond_create
#define enif_cond_destroy erl_drv_cond_destroy
@@ -57,6 +58,9 @@ extern "C" {
#define enif_alloc_compat(E, S) \
enif_alloc(S)
+#define enif_realloc_compat(E, P, S) \
+ enif_realloc(P, S)
+
#define enif_free_compat(E, P) \
enif_free(P)
View
9 ebin/bitcask.app
@@ -55,6 +55,15 @@
{dead_bytes_threshold, 134217728}, % Dead bytes > 128 MB
{small_file_threshold, 10485760}, % File is < 10 MB
+ %% Fold keys thresholds. max_fold_age will reuse the keydir if
+ %% another fold was started less than max_fold_age ago and there
+ %% were less than max_fold_puts updates. Otherwise it will
+ %% wait until all current fold keys complete and then start.
+ %% Set either option to -1 to disable.
+ {max_fold_age, 1000000}, % age in micro seconds
+ {max_fold_puts, -1}, % maximum number of updates
+
+ %% Data expiration can be caused by setting this to a
%% Data expiration can be caused by setting this to a
%% positive value. If so, items older than the value
%% will be discarded.
View
16 src/bitcask.erl
@@ -30,7 +30,7 @@
delete/2,
sync/1,
list_keys/1,
- fold_keys/3,
+ fold_keys/3, fold_keys/5,
fold/3,
merge/1, merge/2, merge/3,
needs_merge/1,
@@ -290,6 +290,18 @@ list_keys(Ref) ->
-spec fold_keys(reference(), Fun::fun(), Acc::term()) ->
term() | {error, any()}.
fold_keys(Ref, Fun, Acc0) ->
+ State = get_state(Ref),
+ MaxAge = get_opt(max_fold_age, State#bc_state.opts) * 1000, % convert from ms to us
+ MaxPuts = get_opt(max_fold_puts, State#bc_state.opts),
+ fold_keys(Ref, Fun, Acc0, MaxAge, MaxPuts).
+
+%% @doc Fold over all keys in a bitcask datastore with limits on how out of date
+%% the keydir is allowed to be.
+%% Must be able to understand the bitcask_entry record form.
+-spec fold_keys(reference(), Fun::fun(), Acc::term(), non_neg_integer() | undefined,
+ non_neg_integer() | undefined) ->
+ term() | {error, any()}.
+fold_keys(Ref, Fun, Acc0, MaxAge, MaxPut) ->
%% Fun should be of the form F(#bitcask_entry, A) -> A
ExpiryTime = expiry_time((get_state(Ref))#bc_state.opts),
RealFun = fun(BCEntry, Acc) ->
@@ -311,7 +323,7 @@ fold_keys(Ref, Fun, Acc0) ->
end
end
end,
- bitcask_nifs:keydir_fold((get_state(Ref))#bc_state.keydir, RealFun, Acc0).
+ bitcask_nifs:keydir_fold((get_state(Ref))#bc_state.keydir, RealFun, Acc0, MaxAge, MaxPut).
%% @doc fold over all K/V pairs in a bitcask datastore.
%% Fun is expected to take F(K,V,Acc0) -> Acc
View
176 src/bitcask_nifs.erl
@@ -30,7 +30,8 @@
keydir_get/2,
keydir_remove/2, keydir_remove/5,
keydir_copy/1,
- keydir_fold/3,
+ keydir_fold/5,
+ keydir_wait_pending/1,
keydir_info/1,
keydir_release/1,
create_file/1,
@@ -81,14 +82,15 @@
ok.
-spec keydir_copy(reference()) ->
{ok, reference()}.
--spec keydir_itr(reference()) ->
+-spec keydir_itr(reference(), integer(), integer()) ->
ok | {error, iteration_not_permitted}.
-spec keydir_itr_next(reference()) ->
#bitcask_entry{} |
- {error, iteration_not_permitted} | allocation_error | not_found.
+ {error, iteration_not_started} | allocation_error | not_found.
-spec keydir_itr_release(reference()) ->
ok.
--spec keydir_fold(reference(), fun((any(), any()) -> any()), any()) ->
+-spec keydir_fold(reference(), fun((any(), any()) -> any()), any(),
+ integer(), integer()) ->
any() | {error, any()}.
-spec keydir_info(reference()) ->
{integer(), integer(),
@@ -158,22 +160,8 @@ keydir_mark_ready(_Ref) ->
end.
keydir_put(Ref, Key, FileId, TotalSz, Offset, Tstamp) ->
- try_keydir_put_int(Ref, Key, FileId, TotalSz, <<Offset:64/unsigned-native>>,
- Tstamp, 0).
-
-try_keydir_put_int(Ref, Key, FileId, TotalSz, BinOffset, Tstamp, Reps) ->
- case keydir_put_int(Ref, Key, FileId, TotalSz, BinOffset, Tstamp) of
- {error, iteration_in_process} ->
- try_keydir_put_int(Ref, Key, FileId, TotalSz, BinOffset, Tstamp, Reps + 1);
- R when Reps > 0 ->
- put_retries(Reps),
- R;
- R ->
- R
- end.
-
-put_retries(_Reps) ->
- ok.
+ keydir_put_int(Ref, Key, FileId, TotalSz, <<Offset:64/unsigned-native>>,
+ Tstamp).
keydir_put_int(_Ref, _Key, _FileId, _TotalSz, _Offset, _Tstamp) ->
case random:uniform(999999999999) of
@@ -219,10 +207,16 @@ keydir_copy(_Ref) ->
_ -> exit("NIF library not loaded")
end.
-keydir_itr(_Ref) ->
+keydir_itr(Ref, MaxAge, MaxPuts) ->
+ {Mega,Secs,Micro} = os:timestamp(),
+ TS = <<((Mega * 1000000 + Secs) * 1000000 + Micro):64/unsigned-native>>,
+ keydir_itr_int(Ref, TS, MaxAge, MaxPuts).
+
+keydir_itr_int(_Ref, _Ts, _MaxAge, _MaxPuts) ->
case random:uniform(999999999999) of
- 666 -> {error, iteration_not_permitted};
- 667 -> ok;
+ 666 -> {error, iteration_in_process};
+ 667 -> out_of_date;
+ 668 -> ok;
_ -> exit("NIF library not loaded")
end.
@@ -237,7 +231,7 @@ keydir_itr_next(Ref) ->
keydir_itr_next_int(_Ref) ->
case random:uniform(999999999999) of
- 666 -> {error, iteration_not_permitted};
+ 666 -> {error, iteration_not_started};
667 -> allocation_error;
668 -> make_bogus_bitcask_entry(<<"BogusKey">>);
669 -> not_found;
@@ -247,8 +241,15 @@ keydir_itr_next_int(_Ref) ->
keydir_itr_release(_Ref) ->
ok.
-keydir_fold(Ref, Fun, Acc0) ->
- case keydir_itr(Ref) of
+keydir_fold(Ref, Fun, Acc0, MaxAge, MaxPuts) ->
+ case keydir_itr(Ref, MaxAge, MaxPuts) of
+ out_of_date ->
+ receive
+ ready -> % fold no matter what on second attempt
+ keydir_fold(Ref, Fun, Acc0, -1, -1);
+ error ->
+ {error, shutdown}
+ end;
ok ->
try
keydir_fold_cont(keydir_itr_next(Ref), Ref, Fun, Acc0)
@@ -259,6 +260,23 @@ keydir_fold(Ref, Fun, Acc0) ->
{error, Reason}
end.
+%% Wait for any pending interation to complete
+keydir_wait_pending(Ref) ->
+ %% Create an iterator, passing a zero timestamp to force waiting for
+ %% any current iteration to complete
+ case keydir_itr_int(Ref, <<0:64/unsigned-native>>, 0, 0) of
+ out_of_date -> % no iter created, wait for message from last fold_keys
+ receive
+ ready ->
+ ok;
+ error ->
+ {error, shutdown}
+ end;
+ ok ->
+ keydir_itr_release(Ref),
+ ok
+ end.
+
keydir_info(_Ref) ->
case random:uniform(999999999999) of
666 -> {make_bogus_non_neg(), make_bogus_non_neg(), [{make_bogus_non_neg(), random:uniform(4242), random:uniform(4242), random:uniform(4242), random:uniform(4242)}]};
@@ -382,7 +400,7 @@ keydir_itr_test_base(Ref) ->
{3, 9, _} = keydir_info(Ref),
- List = keydir_fold(Ref, fun(E, Acc) -> [ E | Acc] end, []),
+ List = keydir_fold(Ref, fun(E, Acc) -> [ E | Acc] end, [], -1, -1),
3 = length(List),
true = lists:keymember(<<"abc">>, #bitcask_entry.key, List),
true = lists:keymember(<<"def">>, #bitcask_entry.key, List),
@@ -413,29 +431,125 @@ keydir_named_not_ready_test() ->
keydir_itr_while_itr_error_test() ->
{ok, Ref1} = keydir_new(),
- ok = keydir_itr(Ref1),
+ ok = keydir_itr(Ref1, -1, -1),
try
- ?assertEqual({error, iteration_in_process}, keydir_itr(Ref1))
+ ?assertEqual({error, iteration_in_process},
+ keydir_itr(Ref1, -1, -1))
after
keydir_itr_release(Ref1)
end.
keydir_double_itr_test() -> % check iterating flag is cleared
{ok, Ref1} = keydir_new(),
Folder = fun(_,Acc) -> Acc end,
- ?assertEqual(acc, keydir_fold(Ref1, Folder, acc)),
- ?assertEqual(acc, keydir_fold(Ref1, Folder, acc)).
+ ?assertEqual(acc, keydir_fold(Ref1, Folder, acc, -1, -1)),
+ ?assertEqual(acc, keydir_fold(Ref1, Folder, acc, -1, -1)).
keydir_next_notstarted_error_test() ->
{ok, Ref1} = keydir_new(),
?assertEqual({error, iteration_not_started}, keydir_itr_next(Ref1)).
+keydir_del_while_pending_test() ->
+ Name = "k_del_while_pending_test",
+ {not_ready, Ref1} = keydir_new(Name),
+ Key = <<"abc">>,
+ ok = keydir_put(Ref1, Key, 0, 1234, 0, 1),
+ keydir_mark_ready(Ref1),
+ ?assertEqual(#bitcask_entry{key = Key, file_id = 0, total_sz = 1234,
+ offset = <<0:64/unsigned-native>>, tstamp = 1},
+ keydir_get_int(Ref1, Key)),
+ {ready, Ref2} = keydir_new(Name),
+ try
+ %% Start keyfold iterator on Ref2
+ ok = keydir_itr(Ref2, -1, -1),
+ %% Delete Key
+ ?assertEqual(ok, keydir_remove(Ref1, Key)),
+ ?assertEqual(not_found, keydir_get(Ref1, Key)),
+
+ %% Keep iterating on Ref2 and check result is [Key]
+ Fun = fun(IterKey, Acc) -> [IterKey | Acc] end,
+ ?assertEqual([#bitcask_entry{key = Key, file_id = 0, total_sz = 1234,
+ offset = 0, tstamp = 1}],
+ keydir_fold_cont(keydir_itr_next(Ref2), Ref2, Fun, []))
+ after
+ %% End iteration
+ ok = keydir_itr_release(Ref2)
+ end,
+ %% Check key is deleted
+ ?assertEqual(not_found, keydir_get(Ref1, Key)).
+
+keydir_create_del_while_pending_test() ->
+ Name = "k_create_del_while_pending_test",
+ {not_ready, Ref1} = keydir_new(Name),
+ Key = <<"abc">>,
+ keydir_mark_ready(Ref1),
+ {ready, Ref2} = keydir_new(Name),
+ try
+ %% Start keyfold iterator on Ref2
+ ok = keydir_itr(Ref2, -1, -1),
+ %% Delete Key
+ ok = keydir_put(Ref1, Key, 0, 1234, 0, 1),
+ ?assertEqual(#bitcask_entry{key = Key, file_id = 0, total_sz = 1234,
+ offset = <<0:64/unsigned-native>>, tstamp = 1},
+ keydir_get_int(Ref1, Key)),
+ ?assertEqual(ok, keydir_remove(Ref1, Key)),
+ ?assertEqual(not_found, keydir_get(Ref1, Key)),
+
+ %% Keep iterating on Ref2 and check result is [] it was started after iter
+ Fun = fun(IterKey, Acc) -> [IterKey | Acc] end,
+ ?assertEqual([], keydir_fold_cont(keydir_itr_next(Ref2), Ref2, Fun, []))
+ after
+ %% End iteration
+ ok = keydir_itr_release(Ref2)
+ end,
+ %% Check key is deleted
+ ?assertEqual(not_found, keydir_get(Ref1, Key)).
+
+keydir_del_put_while_pending_test() ->
+ Name = "k_del_put_while_pending_test",
+ {not_ready, Ref1} = keydir_new(Name),
+ Key = <<"abc">>,
+ keydir_mark_ready(Ref1),
+ {ready, Ref2} = keydir_new(Name),
+ try
+ %% Start keyfold iterator on Ref2
+ ok = keydir_itr(Ref2, -1, -1),
+ %% Delete Key
+ ?assertEqual(ok, keydir_remove(Ref1, Key)),
+ ok = keydir_put(Ref1, Key, 0, 1234, 0, 1),
+ ?assertEqual(#bitcask_entry{key = Key, file_id = 0, total_sz = 1234,
+ offset = <<0:64/unsigned-native>>, tstamp = 1},
+ keydir_get_int(Ref1, Key)),
+
+ %% Keep iterating on Ref2 and check result is [] it was started after iter
+ Fun = fun(IterKey, Acc) -> [IterKey | Acc] end,
+ ?assertEqual([], keydir_fold_cont(keydir_itr_next(Ref2), Ref2, Fun, []))
+ after
+ %% End iteration
+ ok = keydir_itr_release(Ref2)
+ end,
+ %% Check key is still present
+ ?assertEqual(#bitcask_entry{key = Key, file_id = 0, total_sz = 1234,
+ offset = <<0:64/unsigned-native>>, tstamp = 1},
+ keydir_get_int(Ref1, Key)).
+
+keydir_multi_put_during_itr_test() ->
+ {not_ready, Ref} = bitcask_nifs:keydir_new("t"),
+ bitcask_nifs:keydir_mark_ready(Ref),
+ bitcask_nifs:keydir_put(Ref, <<"k">>, 123, 1, 0, 1),
+ bitcask_nifs:keydir_itr(Ref, 0, 0),
+ bitcask_nifs:keydir_put(Ref, <<"k">>, 123, 2, 10, 2),
+ bitcask_nifs:keydir_put(Ref, <<"k">>, 123, 3, 20, 3),
+ bitcask_nifs:keydir_put(Ref, <<"k">>, 123, 4, 30, 4),
+ bitcask_nifs:keydir_itr_release(Ref).
+
create_file_test() ->
Fname = "/tmp/bitcask_nifs.createfile.test",
file:delete(Fname),
true = create_file(Fname),
false = create_file(Fname).
+
-ifdef(EQC).
-define(POW_2(N), trunc(math:pow(2, N))).
View
27 test/bitcask_qc.erl
@@ -37,7 +37,10 @@
-record(m_fstats, {key_bytes=0, live_keys=0, live_bytes=0, total_keys=0, total_bytes=0}).
qc(P) ->
- ?assert(eqc:quickcheck(?QC_OUT(P))).
+ qc(P, 100).
+
+qc(P, NumTests) ->
+ ?assert(eqc:quickcheck(?QC_OUT(eqc:numtests(NumTests, P)))).
keys() ->
eqc_gen:non_empty(list(eqc_gen:non_empty(binary()))).
@@ -46,10 +49,10 @@ values() ->
eqc_gen:non_empty(list(binary())).
ops(Keys, Values) ->
- {oneof([put, delete]), oneof(Keys), oneof(Values)}.
+ {oneof([put, delete, itr, itr_next, itr_release]), oneof(Keys), oneof(Values)}.
apply_kv_ops([], Ref, KVs0, Fstats) ->
- %bitcask_nifs:keydir_itr_release(get_keydir(Ref)), % release any iterators
+ bitcask_nifs:keydir_itr_release(get_keydir(Ref)), % release any iterators
{KVs0, Fstats};
apply_kv_ops([{put, K, V} | Rest], Ref, KVs0, Fstats0) ->
ok = bitcask:put(Ref, K, V),
@@ -58,7 +61,19 @@ apply_kv_ops([{put, K, V} | Rest], Ref, KVs0, Fstats0) ->
apply_kv_ops([{delete, K, _} | Rest], Ref, KVs0, Fstats0) ->
ok = bitcask:delete(Ref, K),
apply_kv_ops(Rest, Ref, orddict:store(K, deleted, KVs0),
- update_fstats(delete, K, orddict:find(K, KVs0), ?TOMBSTONE, Fstats0)).
+ update_fstats(delete, K, orddict:find(K, KVs0), ?TOMBSTONE, Fstats0));
+apply_kv_ops([{itr, _K, _} | Rest], Ref, KVs, Fstats) ->
+ %% Don't care about result, just want to intermix with get/put
+ bitcask_nifs:keydir_itr(get_keydir(Ref), -1, -1),
+ apply_kv_ops(Rest, Ref, KVs, Fstats);
+apply_kv_ops([{itr_next, _K, _} | Rest], Ref, KVs, Fstats) ->
+ %% Don't care about result, just want to intermix with get/put
+ bitcask_nifs:keydir_itr_next(get_keydir(Ref)),
+ apply_kv_ops(Rest, Ref, KVs, Fstats);
+apply_kv_ops([{itr_release, _K, _} | Rest], Ref, KVs, Fstats) ->
+ %% Don't care about result, just want to intermix with get/put
+ bitcask_nifs:keydir_itr_release(get_keydir(Ref)),
+ apply_kv_ops(Rest, Ref, KVs, Fstats).
update_fstats(delete, K, OldV, NewV, Fstats0) -> %% Delete existing key (i.e. write tombstone)
@@ -236,7 +251,7 @@ prop_fold() ->
prop_merge_test_() ->
- {timeout, 3*60, fun() -> qc(prop_merge()) end}.
+ {timeout, 300*60, fun() -> qc(prop_merge()) end}.
merge1_test() ->
?assert(eqc:check(prop_merge(),
@@ -254,7 +269,7 @@ merge3_test() ->
1,1}])).
prop_fold_test_() ->
- {timeout, 3*60, fun() -> qc(prop_fold()) end}.
+ {timeout, 300*60, fun() -> qc(prop_fold()) end}.
get_keydir(Ref) ->

0 comments on commit 64f8731

Please sign in to comment.