Permalink
Browse files

Changes to make it easier to compare LevelDB with RocksDB:

* add --seed option to db_bench so per-thread RNG seed can be changed per-run
* add --sync option to db_bench so WriteOptions::sync option can be set for most tests (fillsync ignores this)
* add --writes_per_second option to db_bench to rate limit write rate for readwhilewriting. Otherwise
  a fast writer can starve reads.
* change --cache_size from int to long to support cache sizes larger than 2G
* change ReadOptions::verify_checksum from false to true. Default for this really should be true.
* increase slowdown and stop triggers for #files in L0 from 8/12 to 12/20. These are configurable
  in RocksDB.
* change block_size from 4096 to 16384. This is configurable in RocksDB.

Summary:

Test Plan:

Reviewers:

CC:

Task ID: #

Blame Rev:
  • Loading branch information...
Mark Callaghan
Mark Callaghan committed Apr 24, 2015
1 parent 77948e7 commit 27eba42f1dc87e27eb862e74c7b5b50f09384936
Showing with 49 additions and 8 deletions.
  1. +45 −4 db/db_bench.cc
  2. +2 −2 db/dbformat.h
  3. +1 −1 include/leveldb/options.h
  4. +1 −1 util/options.cc
View
@@ -64,6 +64,12 @@ static const char* FLAGS_benchmarks =
// Number of key/values to place in database
static int FLAGS_num = 1000000;
static int FLAGS_seed = 0;
static int FLAGS_sync = 0;
static int FLAGS_writes_per_second = 0;
// Number of read operations to do. If negative, do FLAGS_num reads.
static int FLAGS_reads = -1;
@@ -86,7 +92,7 @@ static int FLAGS_write_buffer_size = 0;
// Number of bytes to use as a cache of uncompressed data.
// Negative means use default settings.
static int FLAGS_cache_size = -1;
static long FLAGS_cache_size = -1;
// Maximum number of files to keep open at the same time (use default if == 0)
static int FLAGS_open_files = 0;
@@ -294,7 +300,7 @@ struct ThreadState {
ThreadState(int index)
: tid(index),
rand(1000 + index) {
rand((FLAGS_seed ? FLAGS_seed : 1000) + index) {
}
};
@@ -443,6 +449,8 @@ class Benchmark {
bool fresh_db = false;
int num_threads = FLAGS_threads;
write_options_.sync = FLAGS_sync;
if (name == Slice("open")) {
method = &Benchmark::OpenBench;
num_ /= 10000;
@@ -875,6 +883,18 @@ class Benchmark {
} else {
// Special thread that keeps writing until other threads are done.
RandomGenerator gen;
double last = Env::Default()->NowMicros();
int writes_per_second_by_10 = 0;
int num_writes = 0;
int64_t bytes = 0;
// --writes_per_second rate limit is enforced per 100 milliseconds
// intervals to avoid a burst of writes at the start of each second.
if (FLAGS_writes_per_second > 0)
writes_per_second_by_10 = FLAGS_writes_per_second / 10;
while (true) {
{
MutexLock l(&thread->shared->mu);
@@ -892,6 +912,20 @@ class Benchmark {
fprintf(stderr, "put error: %s\n", s.ToString().c_str());
exit(1);
}
++num_writes;
if (writes_per_second_by_10 && num_writes >= writes_per_second_by_10) {
double now = Env::Default()->NowMicros();
double usecs_since_last = now - last;
num_writes = 0;
last = now;
if (usecs_since_last < 100000.0) {
Env::Default()->SleepForMicroseconds(100000.0 - usecs_since_last);
last = Env::Default()->NowMicros();
}
}
}
// Do not count any of the preceding work/delay in stats.
@@ -943,6 +977,7 @@ int main(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
double d;
int n;
long l;
char junk;
if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
@@ -958,14 +993,20 @@ int main(int argc, char** argv) {
FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
FLAGS_reads = n;
} else if (sscanf(argv[i], "--sync=%d%c", &n, &junk) == 1) {
FLAGS_sync = n;
} else if (sscanf(argv[i], "--seed=%d%c", &n, &junk) == 1) {
FLAGS_seed = n;
} else if (sscanf(argv[i], "--writes_per_second=%d%c", &n, &junk) == 1) {
FLAGS_writes_per_second = n;
} else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
FLAGS_threads = n;
} else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
FLAGS_value_size = n;
} else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
FLAGS_write_buffer_size = n;
} else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
FLAGS_cache_size = n;
} else if (sscanf(argv[i], "--cache_size=%ld%c", &l, &junk) == 1) {
FLAGS_cache_size = l;
} else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
FLAGS_bloom_bits = n;
} else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
View
@@ -25,10 +25,10 @@ static const int kNumLevels = 7;
static const int kL0_CompactionTrigger = 4;
// Soft limit on number of level-0 files. We slow down writes at this point.
static const int kL0_SlowdownWritesTrigger = 8;
static const int kL0_SlowdownWritesTrigger = 12;
// Maximum number of level-0 files. We stop writes at this point.
static const int kL0_StopWritesTrigger = 12;
static const int kL0_StopWritesTrigger = 20;
// Maximum level to which a new compacted memtable is pushed if it
// does not create overlap. We try to push to level 2 to avoid the
@@ -159,7 +159,7 @@ struct ReadOptions {
const Snapshot* snapshot;
ReadOptions()
: verify_checksums(false),
: verify_checksums(true),
fill_cache(true),
snapshot(NULL) {
}
View
@@ -19,7 +19,7 @@ Options::Options()
write_buffer_size(4<<20),
max_open_files(1000),
block_cache(NULL),
block_size(4096),
block_size(16 * 1024),
block_restart_interval(16),
compression(kSnappyCompression),
filter_policy(NULL) {

0 comments on commit 27eba42

Please sign in to comment.