From 73b291884a18cf4bc228e08246cfb0ea97444986 Mon Sep 17 00:00:00 2001 From: yiwu-arbug Date: Wed, 15 May 2019 12:10:24 -0700 Subject: [PATCH] Add clang-format script and CI job (#9) Adding scripts/format-diff.sh to use clang-format to format code changes since diverge from master. Also adding travis CI job to check if the code needs to format. Also format all existing code. Signed-off-by: Yi Wu --- .travis.yml | 21 +- README.md | 3 + include/titan/db.h | 10 +- scripts/format-diff.sh | 3 + scripts/travis-format.sh | 11 + scripts/travis-make.sh | 7 + src/base_db_listener.h | 2 +- src/blob_file_builder.h | 2 +- src/blob_file_cache.cc | 2 +- src/blob_file_cache.h | 2 +- src/blob_file_iterator.cc | 2 +- src/blob_file_iterator.h | 4 +- src/blob_file_iterator_test.cc | 7 +- src/blob_file_manager.h | 2 +- src/blob_file_reader.h | 2 +- src/blob_file_size_collector.h | 2 +- src/blob_file_size_collector_test.cc | 4 +- src/blob_file_test.cc | 10 +- src/blob_format_test.cc | 2 +- src/blob_gc.cc | 4 +- src/blob_gc.h | 2 +- src/blob_gc_job.h | 4 +- src/blob_gc_job_test.cc | 36 +- src/blob_gc_picker.cc | 3 +- src/blob_gc_picker.h | 8 +- src/blob_gc_picker_test.cc | 10 +- src/blob_storage.cc | 25 +- src/blob_storage.h | 29 +- src/db_impl.cc | 24 +- src/db_impl.h | 5 +- src/db_impl_files.cc | 2 +- src/db_iter.h | 2 +- src/table_builder.h | 2 +- src/table_builder_test.cc | 9 +- src/table_factory.h | 2 +- src/titan_db_test.cc | 123 +++-- src/titan_fault_injection_test_env.h | 36 +- src/version_edit.h | 5 +- src/version_set.cc | 58 ++- src/version_set.h | 25 +- src/version_test.cc | 8 +- tools/db_bench_tool.cc | 650 ++++++++++++++------------- tools/titandb_stress.cc | 487 ++++++++++---------- 43 files changed, 838 insertions(+), 819 deletions(-) create mode 100644 scripts/format-diff.sh create mode 100644 scripts/travis-format.sh create mode 100644 scripts/travis-make.sh diff --git a/.travis.yml b/.travis.yml index 7f3b7e38b..70a73ce2a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,10 +9,12 @@ addons: apt: sources: - ubuntu-toolchain-r-test + - llvm-toolchain-xenial-7 packages: - g++-7 - libgflags-dev - lcov + - clang-format-7 # For GCC build, we also report code coverage to codecov. matrix: @@ -26,28 +28,31 @@ matrix: - compiler: clang env: SANITIZER="UBSAN" - env: COMPILER=gcc7 + - env: FORMATTER=ON install: - git clone --depth=1 --branch=tikv-3.0 https://github.com/pingcap/rocksdb.git - if [ "${COMPILER}" == gcc7 ]; then CC=gcc-7; CXX=g++-7; - COVERAGE_OPT="-DCODE_COVERAGE=ON"; + export COVERAGE_OPT="-DCODE_COVERAGE=ON"; fi - if [ ! -z "${BUILD_TYPE}" ]; then - BUILD_OPT="-DCMAKE_BUILD_TYPE=${BUILD_TYPE}"; + export BUILD_OPT="-DCMAKE_BUILD_TYPE=${BUILD_TYPE}"; else - BUILD_OPT="-DCMAKE_BUILD_TYPE=Debug"; + export BUILD_OPT="-DCMAKE_BUILD_TYPE=Debug"; fi - if [ ! -z "${SANITIZER}" ]; then - SANITIZER_OPT="-DWITH_${SANITIZER}=ON"; - TOOLS_OPT="-DWITH_TITAN_TOOLS=OFF"; + export SANITIZER_OPT="-DWITH_${SANITIZER}=ON"; + export TOOLS_OPT="-DWITH_TITAN_TOOLS=OFF"; fi script: - - cmake . -L -DROCKSDB_DIR=./rocksdb -DTRAVIS=ON ${BUILD_OPT} ${SANITIZER_OPT} ${TOOLS_OPT} ${COVERAGE_OPT} - - make -j4 - - ctest -R titan + - if [ -z "${FORMATTER}" ]; then + bash scripts/travis-make.sh; + else + bash scripts/travis-format.sh; + fi after_success: - if [ "${COMPILER}" == gcc7 ]; then diff --git a/README.md b/README.md index 4f556d570..1ad606215 100644 --- a/README.md +++ b/README.md @@ -32,4 +32,7 @@ cmake .. -DROCKSDB_DIR= -DWITH_SNAPPY=ON # Run tests after build. You need to filter tests by "titan" prefix. ctest -R titan + +# To format code, install clang-format and run the script. +bash scripts/format-diff.sh ``` diff --git a/include/titan/db.h b/include/titan/db.h index 2e132d182..5a259491f 100644 --- a/include/titan/db.h +++ b/include/titan/db.h @@ -46,8 +46,7 @@ class TitanDB : public StackableDB { using StackableDB::CreateColumnFamilies; Status CreateColumnFamilies( - const ColumnFamilyOptions& options, - const std::vector& names, + const ColumnFamilyOptions& options, const std::vector& names, std::vector* handles) override { std::vector descs; for (auto& name : names) { @@ -75,7 +74,8 @@ class TitanDB : public StackableDB { Status DropColumnFamilies( const std::vector& handles) override = 0; - Status DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) override = 0; + Status DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) override = + 0; using StackableDB::Merge; Status Merge(const WriteOptions&, ColumnFamilyHandle*, const Slice& /*key*/, @@ -85,8 +85,8 @@ class TitanDB : public StackableDB { using rocksdb::StackableDB::SingleDelete; Status SingleDelete(const WriteOptions& /*wopts*/, - ColumnFamilyHandle* /*column_family*/, - const Slice& /*key*/) override { + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/) override { return Status::NotSupported("Not supported operation in titan db."); } diff --git a/scripts/format-diff.sh b/scripts/format-diff.sh new file mode 100644 index 000000000..d650df4b8 --- /dev/null +++ b/scripts/format-diff.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +git diff `git merge-base master HEAD` | clang-format-diff -style=google -p1 -i diff --git a/scripts/travis-format.sh b/scripts/travis-format.sh new file mode 100644 index 000000000..cba06c961 --- /dev/null +++ b/scripts/travis-format.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -ev +git fetch --depth=1 origin master:master; +git diff $(git merge-base master HEAD) HEAD > diff; +cat diff | clang-format-diff-7 -style=google -p1 > formatted; +if [ -s formatted ]; then + cat formatted; + echo "Run scripts/format-diff.sh to format your code."; + exit 1; +fi; diff --git a/scripts/travis-make.sh b/scripts/travis-make.sh new file mode 100644 index 000000000..b6a342f49 --- /dev/null +++ b/scripts/travis-make.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -ev +cmake . -L -DROCKSDB_DIR=./rocksdb -DTRAVIS=ON ${BUILD_OPT} ${SANITIZER_OPT} ${TOOLS_OPT} ${COVERAGE_OPT} +make -j4 +ctest -R titan + diff --git a/src/base_db_listener.h b/src/base_db_listener.h index 19be39d2f..f2cce94c2 100644 --- a/src/base_db_listener.h +++ b/src/base_db_listener.h @@ -1,7 +1,7 @@ #pragma once -#include "rocksdb/listener.h" #include "db_impl.h" +#include "rocksdb/listener.h" namespace rocksdb { diff --git a/src/blob_file_builder.h b/src/blob_file_builder.h index 09e2a3abc..ebf6152af 100644 --- a/src/blob_file_builder.h +++ b/src/blob_file_builder.h @@ -1,8 +1,8 @@ #pragma once -#include "util/file_reader_writer.h" #include "blob_format.h" #include "titan/options.h" +#include "util/file_reader_writer.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_file_cache.cc b/src/blob_file_cache.cc index 742764f19..32ce00a57 100644 --- a/src/blob_file_cache.cc +++ b/src/blob_file_cache.cc @@ -1,7 +1,7 @@ #include "blob_file_cache.h" -#include "util/filename.h" #include "util.h" +#include "util/filename.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_file_cache.h b/src/blob_file_cache.h index 820bf8fe9..febbb5704 100644 --- a/src/blob_file_cache.h +++ b/src/blob_file_cache.h @@ -1,8 +1,8 @@ #pragma once -#include "rocksdb/options.h" #include "blob_file_reader.h" #include "blob_format.h" +#include "rocksdb/options.h" #include "titan/options.h" namespace rocksdb { diff --git a/src/blob_file_iterator.cc b/src/blob_file_iterator.cc index a25d5d72d..6515a24cf 100644 --- a/src/blob_file_iterator.cc +++ b/src/blob_file_iterator.cc @@ -1,7 +1,7 @@ #include "blob_file_iterator.h" -#include "util/crc32c.h" #include "util.h" +#include "util/crc32c.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_file_iterator.h b/src/blob_file_iterator.h index 361e58eae..bfbc9bf7f 100644 --- a/src/blob_file_iterator.h +++ b/src/blob_file_iterator.h @@ -3,13 +3,13 @@ #include #include +#include "blob_format.h" #include "rocksdb/slice.h" #include "rocksdb/status.h" #include "table/internal_iterator.h" -#include "util/file_reader_writer.h" -#include "blob_format.h" #include "titan/options.h" #include "util.h" +#include "util/file_reader_writer.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_file_iterator_test.cc b/src/blob_file_iterator_test.cc index f61992990..15dedcc01 100644 --- a/src/blob_file_iterator_test.cc +++ b/src/blob_file_iterator_test.cc @@ -2,11 +2,11 @@ #include -#include "util/filename.h" -#include "util/testharness.h" #include "blob_file_builder.h" #include "blob_file_cache.h" #include "blob_file_reader.h" +#include "util/filename.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { @@ -57,7 +57,8 @@ class BlobFileIteratorTest : public testing::Test { { std::unique_ptr f; ASSERT_OK(env_->NewWritableFile(file_name_, &f, env_options_)); - writable_file_.reset(new WritableFileWriter(std::move(f), file_name_, env_options_)); + writable_file_.reset( + new WritableFileWriter(std::move(f), file_name_, env_options_)); } builder_.reset(new BlobFileBuilder(cf_options, writable_file_.get())); } diff --git a/src/blob_file_manager.h b/src/blob_file_manager.h index 5403870ca..86d1e2d1c 100644 --- a/src/blob_file_manager.h +++ b/src/blob_file_manager.h @@ -1,7 +1,7 @@ #pragma once -#include "util/file_reader_writer.h" #include "blob_format.h" +#include "util/file_reader_writer.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_file_reader.h b/src/blob_file_reader.h index 45b4d345e..4f794b1fb 100644 --- a/src/blob_file_reader.h +++ b/src/blob_file_reader.h @@ -1,8 +1,8 @@ #pragma once -#include "util/file_reader_writer.h" #include "blob_format.h" #include "titan/options.h" +#include "util/file_reader_writer.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_file_size_collector.h b/src/blob_file_size_collector.h index c484ddde1..efcaf88ef 100644 --- a/src/blob_file_size_collector.h +++ b/src/blob_file_size_collector.h @@ -1,9 +1,9 @@ #pragma once +#include "db_impl.h" #include "rocksdb/listener.h" #include "rocksdb/table_properties.h" #include "util/coding.h" -#include "db_impl.h" #include "version_set.h" namespace rocksdb { diff --git a/src/blob_file_size_collector_test.cc b/src/blob_file_size_collector_test.cc index bd22c94ba..6910d88ea 100644 --- a/src/blob_file_size_collector_test.cc +++ b/src/blob_file_size_collector_test.cc @@ -40,8 +40,8 @@ class BlobFileSizeCollectorTest : public testing::Test { void NewFileWriter(std::unique_ptr* result) { std::unique_ptr writable_file; ASSERT_OK(env_->NewWritableFile(file_name_, &writable_file, env_options_)); - result->reset( - new WritableFileWriter(std::move(writable_file), file_name_, env_options_)); + result->reset(new WritableFileWriter(std::move(writable_file), file_name_, + env_options_)); ASSERT_TRUE(*result); } diff --git a/src/blob_file_test.cc b/src/blob_file_test.cc index 18fdebd2e..fe5691f7a 100644 --- a/src/blob_file_test.cc +++ b/src/blob_file_test.cc @@ -1,8 +1,8 @@ -#include "util/filename.h" -#include "util/testharness.h" #include "blob_file_builder.h" #include "blob_file_cache.h" #include "blob_file_reader.h" +#include "util/filename.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { @@ -31,7 +31,8 @@ class BlobFileTest : public testing::Test { { std::unique_ptr f; ASSERT_OK(env_->NewWritableFile(file_name_, &f, env_options_)); - file.reset(new WritableFileWriter(std::move(f), file_name_, env_options_)); + file.reset( + new WritableFileWriter(std::move(f), file_name_, env_options_)); } std::unique_ptr builder( new BlobFileBuilder(cf_options, file.get())); @@ -91,7 +92,8 @@ class BlobFileTest : public testing::Test { { std::unique_ptr f; ASSERT_OK(env_->NewWritableFile(file_name_, &f, env_options_)); - file.reset(new WritableFileWriter(std::move(f), file_name_, env_options_)); + file.reset( + new WritableFileWriter(std::move(f), file_name_, env_options_)); } std::unique_ptr builder( new BlobFileBuilder(cf_options, file.get())); diff --git a/src/blob_format_test.cc b/src/blob_format_test.cc index ec7d6ebca..cd4520ae3 100644 --- a/src/blob_format_test.cc +++ b/src/blob_format_test.cc @@ -1,7 +1,7 @@ #include "blob_format.h" -#include "util/testharness.h" #include "testutil.h" #include "util.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_gc.cc b/src/blob_gc.cc index c0c516df5..58b7d2db6 100644 --- a/src/blob_gc.cc +++ b/src/blob_gc.cc @@ -12,9 +12,7 @@ BlobGC::BlobGC(std::vector&& blob_files, BlobGC::~BlobGC() {} -void BlobGC::SetColumnFamily(ColumnFamilyHandle* cfh) { - cfh_ = cfh; -} +void BlobGC::SetColumnFamily(ColumnFamilyHandle* cfh) { cfh_ = cfh; } ColumnFamilyData* BlobGC::GetColumnFamilyData() { auto* cfhi = reinterpret_cast(cfh_); diff --git a/src/blob_gc.h b/src/blob_gc.h index 4233593b0..3d7a0535d 100644 --- a/src/blob_gc.h +++ b/src/blob_gc.h @@ -2,8 +2,8 @@ #include -#include "db/column_family.h" #include "blob_format.h" +#include "db/column_family.h" #include "titan/options.h" namespace rocksdb { diff --git a/src/blob_gc_job.h b/src/blob_gc_job.h index a46447bcc..36d5d5378 100644 --- a/src/blob_gc_job.h +++ b/src/blob_gc_job.h @@ -1,11 +1,11 @@ #pragma once -#include "db/db_impl.h" -#include "rocksdb/status.h" #include "blob_file_builder.h" #include "blob_file_iterator.h" #include "blob_file_manager.h" #include "blob_gc.h" +#include "db/db_impl.h" +#include "rocksdb/status.h" #include "titan/options.h" #include "version_set.h" diff --git a/src/blob_gc_job_test.cc b/src/blob_gc_job_test.cc index 9c053ca83..41fb3a786 100644 --- a/src/blob_gc_job_test.cc +++ b/src/blob_gc_job_test.cc @@ -1,8 +1,8 @@ #include "blob_gc_job.h" -#include "util/testharness.h" #include "blob_gc_picker.h" #include "db_impl.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { @@ -42,7 +42,9 @@ class BlobGCJobTest : public testing::Test { ~BlobGCJobTest() {} void CheckBlobNumber(int expected) { - auto b = version_set_->GetBlobStorage(base_db_->DefaultColumnFamily()->GetID()).lock(); + auto b = + version_set_->GetBlobStorage(base_db_->DefaultColumnFamily()->GetID()) + .lock(); ASSERT_EQ(expected, b->files_.size()); } @@ -80,7 +82,7 @@ class BlobGCJobTest : public testing::Test { } void DestroyDB() { - Status s __attribute__((__unused__)) = db_->Close(); + Status s __attribute__((__unused__)) = db_->Close(); assert(s.ok()); delete db_; db_ = nullptr; @@ -121,7 +123,7 @@ class BlobGCJobTest : public testing::Test { s = blob_gc_job.Run(); mutex_->Lock(); } - + if (s.ok()) { s = blob_gc_job.Finish(); ASSERT_OK(s); @@ -182,12 +184,14 @@ class BlobGCJobTest : public testing::Test { db_->Delete(WriteOptions(), GenKey(i)); } Flush(); - auto b = version_set_->GetBlobStorage(base_db_->DefaultColumnFamily()->GetID()).lock(); + auto b = + version_set_->GetBlobStorage(base_db_->DefaultColumnFamily()->GetID()) + .lock(); ASSERT_EQ(b->files_.size(), 1); auto old = b->files_.begin()->first; -// for (auto& f : b->files_) { -// f.second->marked_for_sample = false; -// } + // for (auto& f : b->files_) { + // f.second->marked_for_sample = false; + // } std::unique_ptr iter; ASSERT_OK(NewIterator(b->files_.begin()->second->file_number(), b->files_.begin()->second->file_size(), &iter)); @@ -198,7 +202,8 @@ class BlobGCJobTest : public testing::Test { ASSERT_TRUE(iter->key().compare(Slice(GenKey(i))) == 0); } RunGC(); - b = version_set_->GetBlobStorage(base_db_->DefaultColumnFamily()->GetID()).lock(); + b = version_set_->GetBlobStorage(base_db_->DefaultColumnFamily()->GetID()) + .lock(); ASSERT_EQ(b->files_.size(), 1); auto new1 = b->files_.begin()->first; ASSERT_TRUE(old != new1); @@ -234,14 +239,15 @@ TEST_F(BlobGCJobTest, DiscardEntry) { TestDiscardEntry(); } TEST_F(BlobGCJobTest, RunGC) { TestRunGC(); } -// Tests blob file will be kept after GC, if it is still visible by active snapshots. +// Tests blob file will be kept after GC, if it is still visible by active +// snapshots. TEST_F(BlobGCJobTest, PurgeBlobs) { NewDB(); auto snap1 = db_->GetSnapshot(); - + for (int i = 0; i < 10; i++) { - db_->Put(WriteOptions(), GenKey(i), GenValue(i)); + db_->Put(WriteOptions(), GenKey(i), GenValue(i)); } Flush(); CheckBlobNumber(1); @@ -254,7 +260,7 @@ TEST_F(BlobGCJobTest, PurgeBlobs) { Flush(); CheckBlobNumber(1); auto snap4 = db_->GetSnapshot(); - + RunGC(); CheckBlobNumber(1); @@ -264,7 +270,7 @@ TEST_F(BlobGCJobTest, PurgeBlobs) { Flush(); auto snap5 = db_->GetSnapshot(); CheckBlobNumber(2); - + db_->ReleaseSnapshot(snap2); RunGC(); CheckBlobNumber(2); @@ -280,7 +286,7 @@ TEST_F(BlobGCJobTest, PurgeBlobs) { db_->ReleaseSnapshot(snap4); RunGC(); CheckBlobNumber(1); - + db_->ReleaseSnapshot(snap5); RunGC(); CheckBlobNumber(1); diff --git a/src/blob_gc_picker.cc b/src/blob_gc_picker.cc index 14200de4a..7ac1a91f3 100644 --- a/src/blob_gc_picker.cc +++ b/src/blob_gc_picker.cc @@ -16,7 +16,8 @@ std::unique_ptr BasicBlobGCPicker::PickBlobGC( uint64_t batch_size = 0; // ROCKS_LOG_INFO(db_options_.info_log, "blob file num:%lu gc score:%lu", - // blob_storage->NumBlobFiles(), blob_storage->gc_score().size()); + // blob_storage->NumBlobFiles(), + // blob_storage->gc_score().size()); for (auto& gc_score : blob_storage->gc_score()) { auto blob_file = blob_storage->FindFile(gc_score.file_number).lock(); assert(blob_file); diff --git a/src/blob_gc_picker.h b/src/blob_gc_picker.h index 9f24eee22..a790d436c 100644 --- a/src/blob_gc_picker.h +++ b/src/blob_gc_picker.h @@ -2,14 +2,14 @@ #include -#include "db/column_family.h" -#include "db/write_callback.h" -#include "rocksdb/status.h" -#include "util/filename.h" #include "blob_file_manager.h" #include "blob_format.h" #include "blob_gc.h" #include "blob_storage.h" +#include "db/column_family.h" +#include "db/write_callback.h" +#include "rocksdb/status.h" +#include "util/filename.h" namespace rocksdb { namespace titandb { diff --git a/src/blob_gc_picker_test.cc b/src/blob_gc_picker_test.cc index 06b136ddd..cf4f7247f 100644 --- a/src/blob_gc_picker_test.cc +++ b/src/blob_gc_picker_test.cc @@ -1,11 +1,11 @@ #include "blob_gc_picker.h" -#include "util/filename.h" -#include "util/testharness.h" #include "blob_file_builder.h" #include "blob_file_cache.h" #include "blob_file_iterator.h" #include "blob_file_reader.h" +#include "util/filename.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { @@ -22,8 +22,10 @@ class BlobGCPickerTest : public testing::Test { const TitanCFOptions& titan_cf_options) { auto blob_file_cache = std::make_shared( titan_db_options, titan_cf_options, NewLRUCache(128)); - blob_storage_.reset(new BlobStorage(titan_db_options, titan_cf_options, blob_file_cache)); - basic_blob_gc_picker_.reset(new BasicBlobGCPicker(titan_db_options, titan_cf_options)); + blob_storage_.reset( + new BlobStorage(titan_db_options, titan_cf_options, blob_file_cache)); + basic_blob_gc_picker_.reset( + new BasicBlobGCPicker(titan_db_options, titan_cf_options)); } void AddBlobFile(uint64_t file_number, uint64_t file_size, diff --git a/src/blob_storage.cc b/src/blob_storage.cc index 5056a1781..dc47d020a 100644 --- a/src/blob_storage.cc +++ b/src/blob_storage.cc @@ -37,7 +37,7 @@ std::weak_ptr BlobStorage::FindFile(uint64_t file_number) const { void BlobStorage::ExportBlobFiles( std::map>& ret) const { ReadLock rl(&mutex_); - for(auto& kv : files_) { + for (auto& kv : files_) { ret.emplace(kv.first, std::weak_ptr(kv.second)); } } @@ -47,13 +47,16 @@ void BlobStorage::AddBlobFile(std::shared_ptr& file) { files_.emplace(std::make_pair(file->file_number(), file)); } -void BlobStorage::MarkFileObsolete(std::shared_ptr file, SequenceNumber obsolete_sequence) { +void BlobStorage::MarkFileObsolete(std::shared_ptr file, + SequenceNumber obsolete_sequence) { WriteLock wl(&mutex_); - obsolete_files_.push_back(std::make_pair(file->file_number(), obsolete_sequence)); + obsolete_files_.push_back( + std::make_pair(file->file_number(), obsolete_sequence)); file->FileStateTransit(BlobFileMeta::FileEvent::kDelete); } -void BlobStorage::GetObsoleteFiles(std::vector* obsolete_files, SequenceNumber oldest_sequence) { +void BlobStorage::GetObsoleteFiles(std::vector* obsolete_files, + SequenceNumber oldest_sequence) { WriteLock wl(&mutex_); for (auto it = obsolete_files_.begin(); it != obsolete_files_.end();) { @@ -68,14 +71,14 @@ void BlobStorage::GetObsoleteFiles(std::vector* obsolete_files, Seq file_cache_->Evict(file_number); ROCKS_LOG_INFO(db_options_.info_log, - "Obsolete blob file %" PRIu64 " (obsolete at %" PRIu64 - ") not visible to oldest snapshot %" PRIu64 ", delete it.", - file_number, obsolete_sequence, oldest_sequence); + "Obsolete blob file %" PRIu64 " (obsolete at %" PRIu64 + ") not visible to oldest snapshot %" PRIu64 ", delete it.", + file_number, obsolete_sequence, oldest_sequence); if (obsolete_files) { obsolete_files->emplace_back( - BlobFileName(db_options_.dirname, file_number)); + BlobFileName(db_options_.dirname, file_number)); } - + it = obsolete_files_.erase(it); continue; } @@ -96,8 +99,7 @@ void BlobStorage::ComputeGCScore() { gc_score_.push_back({}); auto& gcs = gc_score_.back(); gcs.file_number = file.first; - if (file.second->file_size() < - cf_options_.merge_small_file_threshold) { + if (file.second->file_size() < cf_options_.merge_small_file_threshold) { gcs.score = 1; } else { gcs.score = file.second->GetDiscardableRatio(); @@ -111,6 +113,5 @@ void BlobStorage::ComputeGCScore() { }); } - } // namespace titandb } // namespace rocksdb diff --git a/src/blob_storage.h b/src/blob_storage.h index 9f84bece7..887c47724 100644 --- a/src/blob_storage.h +++ b/src/blob_storage.h @@ -2,10 +2,10 @@ #include -#include "rocksdb/options.h" #include "blob_file_cache.h" #include "blob_format.h" #include "blob_gc.h" +#include "rocksdb/options.h" namespace rocksdb { namespace titandb { @@ -21,12 +21,16 @@ class BlobStorage { this->cf_options_ = bs.cf_options_; } - BlobStorage(const TitanDBOptions& _db_options, const TitanCFOptions& _cf_options, + BlobStorage(const TitanDBOptions& _db_options, + const TitanCFOptions& _cf_options, std::shared_ptr _file_cache) - : db_options_(_db_options), cf_options_(_cf_options), file_cache_(_file_cache), destroyed_(false) {} + : db_options_(_db_options), + cf_options_(_cf_options), + file_cache_(_file_cache), + destroyed_(false) {} ~BlobStorage() { - for (auto& file: files_) { + for (auto& file : files_) { file_cache_->Evict(file.second->file_number()); } } @@ -45,11 +49,11 @@ class BlobStorage { // corruption if the file doesn't exist in the specific version. std::weak_ptr FindFile(uint64_t file_number) const; - std::size_t NumBlobFiles() const { + std::size_t NumBlobFiles() const { ReadLock rl(&mutex_); - return files_.size(); + return files_.size(); } - + void ExportBlobFiles( std::map>& ret) const; @@ -78,9 +82,11 @@ class BlobStorage { void AddBlobFile(std::shared_ptr& file); - void GetObsoleteFiles(std::vector* obsolete_files, SequenceNumber oldest_sequence); + void GetObsoleteFiles(std::vector* obsolete_files, + SequenceNumber oldest_sequence); - void MarkFileObsolete(std::shared_ptr file, SequenceNumber obsolete_sequence); + void MarkFileObsolete(std::shared_ptr file, + SequenceNumber obsolete_sequence); private: friend class VersionSet; @@ -92,7 +98,7 @@ class BlobStorage { TitanDBOptions db_options_; TitanCFOptions cf_options_; - // Read Write Mutex, which protects the `files_` structures + // Read Write Mutex, which protects the `files_` structures mutable port::RWMutex mutex_; // Only BlobStorage OWNS BlobFileMeta @@ -103,7 +109,8 @@ class BlobStorage { std::list> obsolete_files_; // It is marked when the column family handle is destroyed, indicating the - // in-memory data structure can be destroyed. Physical files may still be kept. + // in-memory data structure can be destroyed. Physical files may still be + // kept. bool destroyed_; }; diff --git a/src/db_impl.cc b/src/db_impl.cc index 932ea9dbf..569f50f23 100644 --- a/src/db_impl.cc +++ b/src/db_impl.cc @@ -125,7 +125,7 @@ TitanDBImpl::TitanDBImpl(const TitanDBOptions& options, TitanDBImpl::~TitanDBImpl() { Close(); } // how often to schedule delete obs files periods -static constexpr uint32_t kDeleteObsoleteFilesPeriodSecs = 10; // 10s +static constexpr uint32_t kDeleteObsoleteFilesPeriodSecs = 10; // 10s void TitanDBImpl::StartBackgroundTasks() { if (!thread_purge_obsolete_) { @@ -287,7 +287,7 @@ Status TitanDBImpl::CreateColumnFamilies( Status TitanDBImpl::DropColumnFamilies( const std::vector& handles) { std::vector column_families; - for (auto& handle: handles) { + for (auto& handle : handles) { column_families.emplace_back(handle->GetID()); } Status s = db_impl_->DropColumnFamilies(handles); @@ -299,7 +299,8 @@ Status TitanDBImpl::DropColumnFamilies( return s; } -Status TitanDBImpl::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) { +Status TitanDBImpl::DestroyColumnFamilyHandle( + ColumnFamilyHandle* column_family) { auto cf_id = column_family->GetID(); Status s = db_impl_->DestroyColumnFamilyHandle(column_family); @@ -365,7 +366,8 @@ Status TitanDBImpl::GetImpl(const ReadOptions& options, s = storage->Get(options, index, &record, &buffer); if (s.IsCorruption()) { - ROCKS_LOG_DEBUG(db_options_.info_log, "Key:%s Snapshot:%" PRIu64 " GetBlobFile err:%s\n", + ROCKS_LOG_DEBUG(db_options_.info_log, + "Key:%s Snapshot:%" PRIu64 " GetBlobFile err:%s\n", key.ToString(true).c_str(), options.snapshot->GetSequenceNumber(), s.ToString().c_str()); @@ -426,14 +428,14 @@ Iterator* TitanDBImpl::NewIteratorImpl( const ReadOptions& options, ColumnFamilyHandle* handle, std::shared_ptr snapshot) { auto cfd = reinterpret_cast(handle)->cfd(); - + mutex_.Lock(); auto storage = vset_->GetBlobStorage(handle->GetID()); mutex_.Unlock(); - + std::unique_ptr iter(db_impl_->NewIteratorImpl( - options, cfd, options.snapshot->GetSequenceNumber(), nullptr /*read_callback*/, - true /*allow_blob*/, true /*allow_refresh*/)); + options, cfd, options.snapshot->GetSequenceNumber(), + nullptr /*read_callback*/, true /*allow_blob*/, true /*allow_refresh*/)); return new TitanDBIterator(options, storage.lock().get(), snapshot, std::move(iter)); } @@ -456,12 +458,10 @@ Status TitanDBImpl::NewIterators( return Status::OK(); } -const Snapshot* TitanDBImpl::GetSnapshot() { - return db_->GetSnapshot(); -} +const Snapshot* TitanDBImpl::GetSnapshot() { return db_->GetSnapshot(); } void TitanDBImpl::ReleaseSnapshot(const Snapshot* snapshot) { - // TODO: + // TODO: // We can record here whether the oldest snapshot is released. // If not, we can just skip the next round of purging obsolete files. db_->ReleaseSnapshot(snapshot); diff --git a/src/db_impl.h b/src/db_impl.h index ba9c965dc..974c61060 100644 --- a/src/db_impl.h +++ b/src/db_impl.h @@ -1,9 +1,9 @@ #pragma once +#include "blob_file_manager.h" #include "db/db_impl.h" -#include "util/repeatable_thread.h" #include "titan/db.h" -#include "blob_file_manager.h" +#include "util/repeatable_thread.h" #include "version_set.h" namespace rocksdb { @@ -71,6 +71,7 @@ class TitanDBImpl : public TitanDB { void OnCompactionCompleted(const CompactionJobInfo& compaction_job_info); void StartBackgroundTasks(); + private: class FileManager; friend class FileManager; diff --git a/src/db_impl_files.cc b/src/db_impl_files.cc index 4cd988838..20a79742e 100644 --- a/src/db_impl_files.cc +++ b/src/db_impl_files.cc @@ -21,7 +21,7 @@ void TitanDBImpl::PurgeObsoleteFiles() { for (const auto& candidate_file : candidate_files) { ROCKS_LOG_INFO(db_options_.info_log, "Titan deleting obsolete file [%s]", - candidate_file.c_str()); + candidate_file.c_str()); s = env_->DeleteFile(candidate_file); if (!s.ok()) { fprintf(stderr, "Titan deleting file [%s] failed, status:%s", diff --git a/src/db_iter.h b/src/db_iter.h index e3538a3bb..8be90fd26 100644 --- a/src/db_iter.h +++ b/src/db_iter.h @@ -25,7 +25,7 @@ class TitanDBIterator : public Iterator { Status status() const override { // assume volatile inner iter - if(status_.ok()) { + if (status_.ok()) { return iter_->status(); } else { return status_; diff --git a/src/table_builder.h b/src/table_builder.h index 491dd0e1b..b7c8ad5ec 100644 --- a/src/table_builder.h +++ b/src/table_builder.h @@ -1,8 +1,8 @@ #pragma once -#include "table/table_builder.h" #include "blob_file_builder.h" #include "blob_file_manager.h" +#include "table/table_builder.h" #include "titan/options.h" namespace rocksdb { diff --git a/src/table_builder_test.cc b/src/table_builder_test.cc index 32d02493f..4ea102673 100644 --- a/src/table_builder_test.cc +++ b/src/table_builder_test.cc @@ -1,10 +1,10 @@ #include "table/table_builder.h" -#include "table/table_reader.h" -#include "util/filename.h" -#include "util/testharness.h" #include "blob_file_manager.h" #include "blob_file_reader.h" +#include "table/table_reader.h" #include "table_factory.h" +#include "util/filename.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { @@ -80,7 +80,8 @@ class TableBuilderTest : public testing::Test { db_options_.dirname = tmpdir_; cf_options_.min_blob_size = kMinBlobSize; blob_manager_.reset(new FileManager(db_options_)); - table_factory_.reset(new TitanTableFactory(db_options_, cf_options_, blob_manager_)); + table_factory_.reset( + new TitanTableFactory(db_options_, cf_options_, blob_manager_)); } ~TableBuilderTest() { diff --git a/src/table_factory.h b/src/table_factory.h index 631575671..87131d39c 100644 --- a/src/table_factory.h +++ b/src/table_factory.h @@ -1,7 +1,7 @@ #pragma once -#include "rocksdb/table.h" #include "blob_file_manager.h" +#include "rocksdb/table.h" #include "titan/options.h" namespace rocksdb { diff --git a/src/titan_db_test.cc b/src/titan_db_test.cc index a926c481f..62802a669 100644 --- a/src/titan_db_test.cc +++ b/src/titan_db_test.cc @@ -1,16 +1,16 @@ #include #include -#include "titan/db.h" +#include "blob_file_iterator.h" +#include "blob_file_reader.h" #include "db_impl.h" +#include "db_iter.h" +#include "titan/db.h" #include "titan_fault_injection_test_env.h" #include "util/filename.h" #include "util/random.h" -#include "util/testharness.h" #include "util/sync_point.h" -#include "blob_file_reader.h" -#include "blob_file_iterator.h" -#include "db_iter.h" +#include "util/testharness.h" namespace rocksdb { namespace titandb { @@ -113,16 +113,18 @@ class TitanDBTest : public testing::Test { } } - std::weak_ptr GetBlobStorage(ColumnFamilyHandle* cf_handle = nullptr) { - if(cf_handle == nullptr) { + std::weak_ptr GetBlobStorage( + ColumnFamilyHandle* cf_handle = nullptr) { + if (cf_handle == nullptr) { cf_handle = db_->DefaultColumnFamily(); } return db_impl_->vset_->GetBlobStorage(cf_handle->GetID()); } - void VerifyDB(const std::map& data, ReadOptions ropts = ReadOptions()) { + void VerifyDB(const std::map& data, + ReadOptions ropts = ReadOptions()) { db_impl_->PurgeObsoleteFiles(); - + for (auto& kv : data) { std::string value; ASSERT_OK(db_->Get(ropts, kv.first, &value)); @@ -156,9 +158,8 @@ class TitanDBTest : public testing::Test { } } - void VerifyBlob( - uint64_t file_number, - const std::map& data) { + void VerifyBlob(uint64_t file_number, + const std::map& data) { // Open blob file and iterate in-file records EnvOptions env_opt; uint64_t file_size = 0; @@ -166,16 +167,12 @@ class TitanDBTest : public testing::Test { std::unique_ptr readable_file; std::string file_name = BlobFileName(options_.dirname, file_number); ASSERT_OK(env_->GetFileSize(file_name, &file_size)); - NewBlobFileReader(file_number, 0, options_, env_opt, env_, - &readable_file); - BlobFileIterator iter(std::move(readable_file), - file_number, - file_size, - options_ - ); + NewBlobFileReader(file_number, 0, options_, env_opt, env_, &readable_file); + BlobFileIterator iter(std::move(readable_file), file_number, file_size, + options_); iter.SeekToFirst(); - for(auto& kv : data) { - if(kv.second.size() < options_.min_blob_size) { + for (auto& kv : data) { + if (kv.second.size() < options_.min_blob_size) { continue; } ASSERT_EQ(iter.Valid(), true); @@ -360,7 +357,7 @@ TEST_F(TitanDBTest, IngestExternalFiles) { VerifyDB(total_data); Flush(); VerifyDB(total_data); - for(auto& handle : cf_handles_) { + for (auto& handle : cf_handles_) { auto blob = GetBlobStorage(handle); ASSERT_EQ(1, blob.lock()->NumBlobFiles()); } @@ -368,7 +365,7 @@ TEST_F(TitanDBTest, IngestExternalFiles) { CompactRangeOptions copt; ASSERT_OK(db_->CompactRange(copt, nullptr, nullptr)); VerifyDB(total_data); - for(auto& handle : cf_handles_) { + for (auto& handle : cf_handles_) { auto blob = GetBlobStorage(handle); ASSERT_EQ(2, blob.lock()->NumBlobFiles()); std::map> blob_files; @@ -376,7 +373,7 @@ TEST_F(TitanDBTest, IngestExternalFiles) { ASSERT_EQ(2, blob_files.size()); auto bf = blob_files.begin(); VerifyBlob(bf->first, original_data); - bf ++; + bf++; VerifyBlob(bf->first, ingested_data); } } @@ -384,19 +381,20 @@ TEST_F(TitanDBTest, IngestExternalFiles) { TEST_F(TitanDBTest, DropColumnFamily) { Open(); const uint64_t kNumCF = 3; - for(uint64_t i = 1; i <= kNumCF; i++) { + for (uint64_t i = 1; i <= kNumCF; i++) { AddCF(std::to_string(i)); } const uint64_t kNumEntries = 100; std::map data; - for(uint64_t i = 1; i <= kNumEntries; i++) { + for (uint64_t i = 1; i <= kNumEntries; i++) { Put(i, &data); } VerifyDB(data); Flush(); VerifyDB(data); - // Destroy column families handle, check whether the data is preserved after a round of GC and restart. + // Destroy column families handle, check whether the data is preserved after a + // round of GC and restart. for (auto& handle : cf_handles_) { db_->DestroyColumnFamilyHandle(handle); } @@ -405,14 +403,14 @@ TEST_F(TitanDBTest, DropColumnFamily) { Reopen(); VerifyDB(data); - for(auto& handle : cf_handles_) { + for (auto& handle : cf_handles_) { // we can't drop default column family if (handle->GetName() == kDefaultColumnFamilyName) { continue; } ASSERT_OK(db_->DropColumnFamily(handle)); - // The data is actually deleted only after destroying all outstanding column family handles, - // so we can still read from the dropped column family. + // The data is actually deleted only after destroying all outstanding column + // family handles, so we can still read from the dropped column family. VerifyDB(data); } @@ -424,7 +422,7 @@ TEST_F(TitanDBTest, BlobFileIOError) { std::unique_ptr mock_env( new TitanFaultInjectionTestEnv(env_)); options_.env = mock_env.get(); - options_.disable_background_gc = true; // avoid abort by BackgroundGC + options_.disable_background_gc = true; // avoid abort by BackgroundGC Open(); std::map data; @@ -437,17 +435,13 @@ TEST_F(TitanDBTest, BlobFileIOError) { ASSERT_OK(db_->CompactRange(copts, nullptr, nullptr)); VerifyDB(data); - SyncPoint::GetInstance()->SetCallBack( - "BlobFileReader::Get", [&](void *) { - mock_env->SetFilesystemActive( - false, - Status::IOError("Injected error") - ); - }); + SyncPoint::GetInstance()->SetCallBack("BlobFileReader::Get", [&](void*) { + mock_env->SetFilesystemActive(false, Status::IOError("Injected error")); + }); SyncPoint::GetInstance()->EnableProcessing(); - for(auto& it : data) { + for (auto& it : data) { std::string value; - if(it.second.size() > options_.min_blob_size) { + if (it.second.size() > options_.min_blob_size) { ASSERT_TRUE(db_->Get(ReadOptions(), it.first, &value).IsIOError()); mock_env->SetFilesystemActive(true); } @@ -466,7 +460,7 @@ TEST_F(TitanDBTest, BlobFileIOError) { iter->SeekToFirst(); ASSERT_TRUE(iter->Valid()); SyncPoint::GetInstance()->EnableProcessing(); - iter->Next(); // second value (k=2) is inlined + iter->Next(); // second value (k=2) is inlined ASSERT_TRUE(iter->Valid()); iter->Next(); ASSERT_TRUE(iter->status().IsIOError()); @@ -477,7 +471,7 @@ TEST_F(TitanDBTest, BlobFileIOError) { SyncPoint::GetInstance()->ClearAllCallBacks(); // env must be destructed AFTER db is closed to avoid // `pure abstract method called` complaint. - iter.reset(nullptr); // early release to avoid outstanding reference + iter.reset(nullptr); // early release to avoid outstanding reference Close(); db_ = nullptr; } @@ -486,7 +480,7 @@ TEST_F(TitanDBTest, FlushWriteIOErrorHandling) { std::unique_ptr mock_env( new TitanFaultInjectionTestEnv(env_)); options_.env = mock_env.get(); - options_.disable_background_gc = true; // avoid abort by BackgroundGC + options_.disable_background_gc = true; // avoid abort by BackgroundGC Open(); std::map data; @@ -499,13 +493,10 @@ TEST_F(TitanDBTest, FlushWriteIOErrorHandling) { // no compaction to enable Flush VerifyDB(data); - SyncPoint::GetInstance()->SetCallBack( - "FlushJob::Start", [&](void *) { - mock_env->SetFilesystemActive( - false, - Status::IOError("FlushJob injected error") - ); - }); + SyncPoint::GetInstance()->SetCallBack("FlushJob::Start", [&](void*) { + mock_env->SetFilesystemActive(false, + Status::IOError("FlushJob injected error")); + }); SyncPoint::GetInstance()->EnableProcessing(); FlushOptions fopts; ASSERT_TRUE(db_->Flush(fopts).IsIOError()); @@ -526,10 +517,10 @@ TEST_F(TitanDBTest, FlushWriteIOErrorHandling) { } TEST_F(TitanDBTest, CompactionWriteIOErrorHandling) { - std::unique_ptr mock_env( + std::unique_ptr mock_env( new TitanFaultInjectionTestEnv(env_)); options_.env = mock_env.get(); - options_.disable_background_gc = true; // avoid abort by BackgroundGC + options_.disable_background_gc = true; // avoid abort by BackgroundGC Open(); std::map data; @@ -543,12 +534,10 @@ TEST_F(TitanDBTest, CompactionWriteIOErrorHandling) { VerifyDB(data); SyncPoint::GetInstance()->SetCallBack( - "BackgroundCallCompaction:0", [&](void *) { - mock_env->SetFilesystemActive( - false, - Status::IOError("Compaction injected error") - ); - }); + "BackgroundCallCompaction:0", [&](void*) { + mock_env->SetFilesystemActive( + false, Status::IOError("Compaction injected error")); + }); SyncPoint::GetInstance()->EnableProcessing(); ASSERT_TRUE(db_->CompactRange(copts, nullptr, nullptr).IsIOError()); SyncPoint::GetInstance()->DisableProcessing(); @@ -568,7 +557,7 @@ TEST_F(TitanDBTest, CompactionWriteIOErrorHandling) { } TEST_F(TitanDBTest, BlobFileCorruptionErrorHandling) { - options_.disable_background_gc = true; // avoid abort by BackgroundGC + options_.disable_background_gc = true; // avoid abort by BackgroundGC Open(); std::map data; const int kNumEntries = 100; @@ -582,16 +571,16 @@ TEST_F(TitanDBTest, BlobFileCorruptionErrorHandling) { // Modify the checksum data to reproduce a mismatch SyncPoint::GetInstance()->SetCallBack( - "BlobDecoder::DecodeRecord", [&](void* arg) { - auto* crc = reinterpret_cast(arg); - *crc = *crc + 1; - }); + "BlobDecoder::DecodeRecord", [&](void* arg) { + auto* crc = reinterpret_cast(arg); + *crc = *crc + 1; + }); SyncPoint::GetInstance()->EnableProcessing(); for (auto& it : data) { std::string value; - if(it.second.size() < options_.min_blob_size) { - continue; + if (it.second.size() < options_.min_blob_size) { + continue; } ASSERT_TRUE(db_->Get(ReadOptions(), it.first, &value).IsCorruption()); } @@ -607,7 +596,7 @@ TEST_F(TitanDBTest, BlobFileCorruptionErrorHandling) { iter->SeekToFirst(); ASSERT_TRUE(iter->Valid()); SyncPoint::GetInstance()->EnableProcessing(); - iter->Next(); // second value (k=2) is inlined + iter->Next(); // second value (k=2) is inlined ASSERT_TRUE(iter->Valid()); iter->Next(); ASSERT_TRUE(iter->status().IsCorruption()); @@ -615,7 +604,7 @@ TEST_F(TitanDBTest, BlobFileCorruptionErrorHandling) { SyncPoint::GetInstance()->ClearAllCallBacks(); } -#endif // !NDEBUG +#endif // !NDEBUG } // namespace titandb } // namespace rocksdb diff --git a/src/titan_fault_injection_test_env.h b/src/titan_fault_injection_test_env.h index a166d6a6d..90126050a 100644 --- a/src/titan_fault_injection_test_env.h +++ b/src/titan_fault_injection_test_env.h @@ -14,27 +14,23 @@ class TitanTestRandomAccessFile : public RandomAccessFile { public: explicit TitanTestRandomAccessFile(std::unique_ptr&& f, TitanFaultInjectionTestEnv* env) - : target_(std::move(f)), - env_(env) { + : target_(std::move(f)), env_(env) { assert(target_ != nullptr); } - virtual ~TitanTestRandomAccessFile() { } + virtual ~TitanTestRandomAccessFile() {} Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override; Status Prefetch(uint64_t offset, size_t n) override; size_t GetUniqueId(char* id, size_t max_size) const override { return target_->GetUniqueId(id, max_size); } - void Hint(AccessPattern pattern) override { - return target_->Hint(pattern); - } - bool use_direct_io() const override { - return target_->use_direct_io(); - } + void Hint(AccessPattern pattern) override { return target_->Hint(pattern); } + bool use_direct_io() const override { return target_->use_direct_io(); } size_t GetRequiredBufferAlignment() const override { return target_->GetRequiredBufferAlignment(); } Status InvalidateCache(size_t offset, size_t length) override; + private: std::unique_ptr target_; TitanFaultInjectionTestEnv* env_; @@ -42,9 +38,8 @@ class TitanTestRandomAccessFile : public RandomAccessFile { class TitanFaultInjectionTestEnv : public FaultInjectionTestEnv { public: - TitanFaultInjectionTestEnv(Env* t) - : FaultInjectionTestEnv(t) { } - virtual ~TitanFaultInjectionTestEnv() { } + TitanFaultInjectionTestEnv(Env* t) : FaultInjectionTestEnv(t) {} + virtual ~TitanFaultInjectionTestEnv() {} Status NewRandomAccessFile(const std::string& fname, std::unique_ptr* result, const EnvOptions& soptions) { @@ -59,27 +54,28 @@ class TitanFaultInjectionTestEnv : public FaultInjectionTestEnv { } }; -Status TitanTestRandomAccessFile::Read(uint64_t offset, size_t n, - Slice* result, char* scratch) const { - if(!env_->IsFilesystemActive()) { +Status TitanTestRandomAccessFile::Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const { + if (!env_->IsFilesystemActive()) { return env_->GetError(); } return target_->Read(offset, n, result, scratch); } Status TitanTestRandomAccessFile::Prefetch(uint64_t offset, size_t n) { - if(!env_->IsFilesystemActive()) { + if (!env_->IsFilesystemActive()) { return env_->GetError(); } return target_->Prefetch(offset, n); } -Status TitanTestRandomAccessFile::InvalidateCache(size_t offset, size_t length) { - if(!env_->IsFilesystemActive()) { +Status TitanTestRandomAccessFile::InvalidateCache(size_t offset, + size_t length) { + if (!env_->IsFilesystemActive()) { return env_->GetError(); } return target_->InvalidateCache(offset, length); } -} // namespace titandb -} // namespace rocksdb \ No newline at end of file +} // namespace titandb +} // namespace rocksdb \ No newline at end of file diff --git a/src/version_edit.h b/src/version_edit.h index 1001a6432..3c93ddc89 100644 --- a/src/version_edit.h +++ b/src/version_edit.h @@ -2,8 +2,8 @@ #include -#include "rocksdb/slice.h" #include "blob_format.h" +#include "rocksdb/slice.h" namespace rocksdb { namespace titandb { @@ -21,7 +21,8 @@ class VersionEdit { added_files_.push_back(file); } - void DeleteBlobFile(uint64_t file_number, SequenceNumber obsolete_sequence = 0) { + void DeleteBlobFile(uint64_t file_number, + SequenceNumber obsolete_sequence = 0) { deleted_files_.emplace_back(std::make_pair(file_number, obsolete_sequence)); } diff --git a/src/version_set.cc b/src/version_set.cc index 87cbba193..7dcdc08df 100644 --- a/src/version_set.cc +++ b/src/version_set.cc @@ -210,10 +210,10 @@ Status VersionSet::Apply(VersionEdit* edit) { auto cf_id = edit->column_family_id_; auto it = column_families_.find(cf_id); if (it == column_families_.end()) { - // TODO: support OpenForReadOnly which doesn't open DB with all column family - // so there are maybe some invalid column family, but we can't just skip it - // otherwise blob files of the non-open column families will be regarded as - // obsolete and deleted. + // TODO: support OpenForReadOnly which doesn't open DB with all column + // family so there are maybe some invalid column family, but we can't just + // skip it otherwise blob files of the non-open column families will be + // regarded as obsolete and deleted. return Status::OK(); } auto& files = it->second->files_; @@ -225,7 +225,8 @@ Status VersionSet::Apply(VersionEdit* edit) { fprintf(stderr, "blob file %" PRIu64 " doesn't exist before\n", number); abort(); } else if (blob_it->second->is_obsolete()) { - fprintf(stderr, "blob file %" PRIu64 " has been deleted before\n", number); + fprintf(stderr, "blob file %" PRIu64 " has been deleted before\n", + number); abort(); } it->second->MarkFileObsolete(blob_it->second, file.second); @@ -236,9 +237,11 @@ Status VersionSet::Apply(VersionEdit* edit) { auto blob_it = files.find(number); if (blob_it != files.end()) { if (blob_it->second->is_obsolete()) { - fprintf(stderr, "blob file %" PRIu64 " has been deleted before\n", number); + fprintf(stderr, "blob file %" PRIu64 " has been deleted before\n", + number); } else { - fprintf(stderr, "blob file %" PRIu64 " has been added before\n", number); + fprintf(stderr, "blob file %" PRIu64 " has been added before\n", + number); } abort(); } @@ -249,37 +252,41 @@ Status VersionSet::Apply(VersionEdit* edit) { return Status::OK(); } -void VersionSet::AddColumnFamilies(const std::map& column_families) { +void VersionSet::AddColumnFamilies( + const std::map& column_families) { for (auto& cf : column_families) { auto file_cache = std::make_shared(db_options_, cf.second, file_cache_); - auto blob_storage = std::make_shared(db_options_, cf.second, file_cache); + auto blob_storage = + std::make_shared(db_options_, cf.second, file_cache); column_families_.emplace(cf.first, blob_storage); } } -Status VersionSet::DropColumnFamilies(const std::vector& column_families, SequenceNumber obsolete_sequence) { +Status VersionSet::DropColumnFamilies( + const std::vector& column_families, + SequenceNumber obsolete_sequence) { Status s; for (auto& cf_id : column_families) { auto it = column_families_.find(cf_id); if (it != column_families_.end()) { VersionEdit edit; edit.SetColumnFamilyID(it->first); - for (auto& file: it->second->files_) { + for (auto& file : it->second->files_) { ROCKS_LOG_INFO(db_options_.info_log, "Titan add obsolete file [%llu]", - file.second->file_number()); + file.second->file_number()); edit.DeleteBlobFile(file.first, obsolete_sequence); } s = LogAndApply(&edit); if (!s.ok()) return s; } else { - ROCKS_LOG_ERROR(db_options_.info_log, - "column %u not found for drop\n", cf_id); + ROCKS_LOG_ERROR(db_options_.info_log, "column %u not found for drop\n", + cf_id); return Status::NotFound("invalid column family"); } obsolete_columns_.insert(cf_id); - } - return s; + } + return s; } Status VersionSet::DestroyColumnFamily(uint32_t cf_id) { @@ -292,25 +299,27 @@ Status VersionSet::DestroyColumnFamily(uint32_t cf_id) { } return Status::OK(); } - ROCKS_LOG_ERROR(db_options_.info_log, - "column %u not found for destroy\n", cf_id); + ROCKS_LOG_ERROR(db_options_.info_log, "column %u not found for destroy\n", + cf_id); return Status::NotFound("invalid column family"); } -void VersionSet::GetObsoleteFiles(std::vector* obsolete_files, SequenceNumber oldest_sequence) { +void VersionSet::GetObsoleteFiles(std::vector* obsolete_files, + SequenceNumber oldest_sequence) { for (auto it = column_families_.begin(); it != column_families_.end();) { auto& cf_id = it->first; auto& blob_storage = it->second; - // In the case of dropping column family, obsolete blob files can be deleted only - // after the column family handle is destroyed. + // In the case of dropping column family, obsolete blob files can be deleted + // only after the column family handle is destroyed. if (obsolete_columns_.find(cf_id) != obsolete_columns_.end()) { ++it; continue; } blob_storage->GetObsoleteFiles(obsolete_files, oldest_sequence); - - // Cleanup obsolete column family when all the blob files for that are deleted. + + // Cleanup obsolete column family when all the blob files for that are + // deleted. if (blob_storage->MaybeRemove()) { it = column_families_.erase(it); continue; @@ -318,7 +327,8 @@ void VersionSet::GetObsoleteFiles(std::vector* obsolete_files, Sequ ++it; } - obsolete_files->insert(obsolete_files->end(), obsolete_manifests_.begin(), obsolete_manifests_.end()); + obsolete_files->insert(obsolete_files->end(), obsolete_manifests_.begin(), + obsolete_manifests_.end()); obsolete_manifests_.clear(); } diff --git a/src/version_set.h b/src/version_set.h index 6a9f44293..df161ae9b 100644 --- a/src/version_set.h +++ b/src/version_set.h @@ -5,16 +5,16 @@ #include #include +#include "blob_file_cache.h" +#include "blob_storage.h" #include "db/log_reader.h" #include "db/log_writer.h" #include "port/port_posix.h" #include "rocksdb/options.h" #include "rocksdb/status.h" -#include "util/mutexlock.h" -#include "blob_file_cache.h" #include "titan/options.h" +#include "util/mutexlock.h" #include "version_edit.h" -#include "blob_storage.h" namespace rocksdb { namespace titandb { @@ -43,7 +43,8 @@ class VersionSet { // Drops some column families. The obsolete files will be deleted in // background when they will not be accessed anymore. // REQUIRES: mutex is held - Status DropColumnFamilies(const std::vector& handles, SequenceNumber obsolete_sequence); + Status DropColumnFamilies(const std::vector& handles, + SequenceNumber obsolete_sequence); // Destroy the column family. Only after this is called, the obsolete files // of the dropped column family can be physical deleted. @@ -63,7 +64,8 @@ class VersionSet { } // REQUIRES: mutex is held - void GetObsoleteFiles(std::vector* obsolete_files, SequenceNumber oldest_sequence); + void GetObsoleteFiles(std::vector* obsolete_files, + SequenceNumber oldest_sequence); // REQUIRES: mutex is held void MarkAllFilesForGC() { @@ -71,6 +73,7 @@ class VersionSet { cf.second->MarkAllFilesForGC(); } } + private: friend class BlobFileSizeCollectorTest; friend class VersionTest; @@ -80,7 +83,7 @@ class VersionSet { Status OpenManifest(uint64_t number); Status WriteSnapshot(log::Writer* log); - + Status Apply(VersionEdit* edit); std::string dirname_; @@ -91,10 +94,12 @@ class VersionSet { std::vector obsolete_manifests_; - // As rocksdb described, `DropColumnFamilies()` only records the drop of the column family specified by ColumnFamilyHandle. - // The actual data is not deleted until the client calls `delete column_family`, namely `DestroyColumnFamilyHandle()`. - // We can still continue using the column family if we have outstanding ColumnFamilyHandle pointer. - // So here record the dropped column family but the handler is not destroyed. + // As rocksdb described, `DropColumnFamilies()` only records the drop of the + // column family specified by ColumnFamilyHandle. The actual data is not + // deleted until the client calls `delete column_family`, namely + // `DestroyColumnFamilyHandle()`. We can still continue using the column + // family if we have outstanding ColumnFamilyHandle pointer. So here record + // the dropped column family but the handler is not destroyed. std::unordered_set obsolete_columns_; std::unordered_map> column_families_; diff --git a/src/version_test.cc b/src/version_test.cc index c0b3e15f9..3725c2ed0 100644 --- a/src/version_test.cc +++ b/src/version_test.cc @@ -1,7 +1,7 @@ -#include "util/filename.h" -#include "util/testharness.h" #include "testutil.h" #include "util.h" +#include "util/filename.h" +#include "util/testharness.h" #include "version_edit.h" #include "version_set.h" @@ -82,7 +82,7 @@ class VersionTest : public testing::Test { auto& storage = column_families_[it.first]; // ignore obsolete file auto size = 0; - for (auto& file: it.second->files_) { + for (auto& file : it.second->files_) { if (!file.second->is_obsolete()) { size++; } @@ -198,7 +198,7 @@ TEST_F(VersionTest, ObsoleteFiles) { std::vector cfs = {1}; ASSERT_OK(vset_->DropColumnFamilies(cfs, 0)); vset_->GetObsoleteFiles(&of, kMaxSequenceNumber); - ASSERT_EQ(of.size(), 1); + ASSERT_EQ(of.size(), 1); CheckColumnFamiliesSize(10); ASSERT_OK(vset_->DestroyColumnFamily(1)); diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index d3dfaa8b9..e1fee7bf6 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -224,20 +224,24 @@ DEFINE_string(column_family_distribution, "", "and `num_hot_column_families=0`, a valid list could be " "\"10,20,30,40\"."); -DEFINE_int64(reads, -1, "Number of read operations to do. " +DEFINE_int64(reads, -1, + "Number of read operations to do. " "If negative, do FLAGS_num reads."); -DEFINE_int64(deletes, -1, "Number of delete operations to do. " +DEFINE_int64(deletes, -1, + "Number of delete operations to do. " "If negative, do FLAGS_num deletions."); DEFINE_int32(bloom_locality, 0, "Control bloom filter probes locality"); -DEFINE_int64(seed, 0, "Seed base for random number generators. " +DEFINE_int64(seed, 0, + "Seed base for random number generators. " "When 0 it is deterministic."); DEFINE_int32(threads, 1, "Number of concurrent threads to run."); -DEFINE_int32(duration, 0, "Time in seconds for the random-ops tests to run." +DEFINE_int32(duration, 0, + "Time in seconds for the random-ops tests to run." " When 0 then num & reads determine the test duration"); DEFINE_int32(value_size, 100, "Size of each value"); @@ -277,7 +281,8 @@ DEFINE_int32(key_size, 16, "size of each key"); DEFINE_int32(num_multi_db, 0, "Number of DBs used in the benchmark. 0 means single DB."); -DEFINE_double(compression_ratio, 0.5, "Arrange to generate values that shrink" +DEFINE_double(compression_ratio, 0.5, + "Arrange to generate values that shrink" " to this fraction of their original size after compression"); DEFINE_double(read_random_exp_range, 0.0, @@ -335,8 +340,7 @@ DEFINE_int32(max_write_buffer_number_to_maintain, "after they are flushed. If this value is set to -1, " "'max_write_buffer_number' will be used."); -DEFINE_int32(max_background_jobs, - rocksdb::Options().max_background_jobs, +DEFINE_int32(max_background_jobs, rocksdb::Options().max_background_jobs, "The maximum number of concurrent background jobs that can occur " "in parallel."); @@ -362,17 +366,15 @@ DEFINE_int32(base_background_compactions, -1, "DEPRECATED"); DEFINE_uint64(subcompactions, 1, "Maximum number of subcompactions to divide L0-L1 compactions " "into."); -static const bool FLAGS_subcompactions_dummy - __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_subcompactions, - &ValidateUint32Range); +static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) = + RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range); -DEFINE_int32(max_background_flushes, - rocksdb::Options().max_background_flushes, +DEFINE_int32(max_background_flushes, rocksdb::Options().max_background_flushes, "The maximum number of concurrent background flushes" " that can occur in parallel."); static rocksdb::CompactionStyle FLAGS_compaction_style_e; -DEFINE_int32(compaction_style, (int32_t) rocksdb::Options().compaction_style, +DEFINE_int32(compaction_style, (int32_t)rocksdb::Options().compaction_style, "style of compaction: level-based, universal and fifo"); static rocksdb::CompactionPri FLAGS_compaction_pri_e; @@ -383,10 +385,12 @@ DEFINE_int32(universal_size_ratio, 0, "Percentage flexibility while comparing file size" " (for universal compaction only)."); -DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files in a" +DEFINE_int32(universal_min_merge_width, 0, + "The minimum number of files in a" " single compaction run (for universal compaction only)."); -DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact" +DEFINE_int32(universal_max_merge_width, 0, + "The max number of files to compact" " in universal style compaction"); DEFINE_int32(universal_max_size_amplification_percent, 0, @@ -499,7 +503,7 @@ DEFINE_int32(file_opening_threads, rocksdb::Options().max_file_opening_threads, "threads that will be used to open files during DB::Open()"); DEFINE_bool(new_table_reader_for_compaction_inputs, true, - "If true, uses a separate file handle for compaction inputs"); + "If true, uses a separate file handle for compaction inputs"); DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size"); @@ -509,7 +513,8 @@ DEFINE_int32(random_access_max_buffer_size, 1024 * 1024, DEFINE_int32(writable_file_max_buffer_size, 1024 * 1024, "Maximum write buffer for Writable File"); -DEFINE_int32(bloom_bits, -1, "Bloom filter bits per key. Negative means" +DEFINE_int32(bloom_bits, -1, + "Bloom filter bits per key. Negative means" " use default settings."); DEFINE_double(memtable_bloom_size_ratio, 0, "Ratio of memtable size used for bloom filter. 0 means no bloom " @@ -517,7 +522,8 @@ DEFINE_double(memtable_bloom_size_ratio, 0, DEFINE_bool(memtable_use_huge_page, false, "Try to use huge page in memtables."); -DEFINE_bool(use_existing_db, false, "If true, do not destroy the existing" +DEFINE_bool(use_existing_db, false, + "If true, do not destroy the existing" " database. If you set this flag and also specify a benchmark that" " wants a fresh database, that benchmark will fail."); @@ -546,8 +552,8 @@ DEFINE_bool(use_keep_filter, false, "Whether to use a noop compaction filter"); static bool ValidateCacheNumshardbits(const char* flagname, int32_t value) { if (value >= 20) { - fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", - flagname, value); + fprintf(stderr, "Invalid value for --%s: %d, must be < 20\n", flagname, + value); return false; } return true; @@ -561,10 +567,12 @@ DEFINE_bool(statistics, false, "Database statistics"); DEFINE_string(statistics_string, "", "Serialized statistics string"); static class std::shared_ptr dbstats; -DEFINE_int64(writes, -1, "Number of write operations to do. If negative, do" +DEFINE_int64(writes, -1, + "Number of write operations to do. If negative, do" " --num reads."); -DEFINE_bool(finish_after_writes, false, "Write thread terminates after all writes are finished"); +DEFINE_bool(finish_after_writes, false, + "Write thread terminates after all writes are finished"); DEFINE_bool(sync, false, "Sync all writes to disk"); @@ -616,24 +624,27 @@ DEFINE_int32(level0_file_num_compaction_trigger, " when compactions start"); static bool ValidateInt32Percent(const char* flagname, int32_t value) { - if (value <= 0 || value>=100) { - fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", - flagname, value); + if (value <= 0 || value >= 100) { + fprintf(stderr, "Invalid value for --%s: %d, 0< pct <100 \n", flagname, + value); return false; } return true; } -DEFINE_int32(readwritepercent, 90, "Ratio of reads to reads/writes (expressed" +DEFINE_int32(readwritepercent, 90, + "Ratio of reads to reads/writes (expressed" " as percentage) for the ReadRandomWriteRandom workload. The " "default value 90 means 90% operations out of all reads and writes" " operations are reads. In other words, 9 gets for every 1 put."); -DEFINE_int32(mergereadpercent, 70, "Ratio of merges to merges&reads (expressed" +DEFINE_int32(mergereadpercent, 70, + "Ratio of merges to merges&reads (expressed" " as percentage) for the ReadRandomMergeRandom workload. The" " default value 70 means 70% out of all read and merge operations" " are merges. In other words, 7 merges for every 3 gets."); -DEFINE_int32(deletepercent, 2, "Percentage of deletes out of reads/writes/" +DEFINE_int32(deletepercent, 2, + "Percentage of deletes out of reads/writes/" "deletes (used in RandomWithVerify only). RandomWithVerify " "calculates writepercent as (100 - FLAGS_readwritepercent - " "deletepercent), so deletepercent must be smaller than (100 - " @@ -749,7 +760,8 @@ DEFINE_bool(use_stderr_info_logger, false, DEFINE_string(trace_file, "", "Trace workload to a file. "); -static enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { +static enum rocksdb::CompressionType StringToCompressionType( + const char* ctype) { assert(ctype); if (!strcasecmp(ctype, "none")) @@ -803,7 +815,8 @@ DEFINE_int32(compression_zstd_max_train_bytes, "Maximum size of training data passed to zstd's dictionary " "trainer."); -DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts" +DEFINE_int32(min_level_to_compress, -1, + "If non-negative, compression starts" " from this level. Levels with number < min_level_to_compress are" " not compressed. Otherwise, apply compression_type to " "all levels."); @@ -820,20 +833,25 @@ static bool ValidateTableCacheNumshardbits(const char* flagname, DEFINE_int32(table_cache_numshardbits, 4, ""); #ifndef ROCKSDB_LITE -DEFINE_string(env_uri, "", "URI for registry Env lookup. Mutually exclusive" +DEFINE_string(env_uri, "", + "URI for registry Env lookup. Mutually exclusive" " with --hdfs."); #endif // ROCKSDB_LITE -DEFINE_string(hdfs, "", "Name of hdfs environment. Mutually exclusive with" +DEFINE_string(hdfs, "", + "Name of hdfs environment. Mutually exclusive with" " --env_uri."); static rocksdb::Env* FLAGS_env = rocksdb::Env::Default(); -DEFINE_int64(stats_interval, 0, "Stats are reported every N operations when " +DEFINE_int64(stats_interval, 0, + "Stats are reported every N operations when " "this is greater than zero. When 0 the interval grows over time."); -DEFINE_int64(stats_interval_seconds, 0, "Report stats every N seconds. This " +DEFINE_int64(stats_interval_seconds, 0, + "Report stats every N seconds. This " "overrides stats_interval when both are > 0."); -DEFINE_int32(stats_per_interval, 0, "Reports additional stats per interval when" +DEFINE_int32(stats_per_interval, 0, + "Reports additional stats per interval when" " this is greater than 0."); DEFINE_int64(report_interval_seconds, 0, @@ -848,11 +866,12 @@ DEFINE_int32(thread_status_per_interval, 0, "Takes and report a snapshot of the current status of each thread" " when this is greater than 0."); -DEFINE_int32(perf_level, rocksdb::PerfLevel::kDisable, "Level of perf collection"); +DEFINE_int32(perf_level, rocksdb::PerfLevel::kDisable, + "Level of perf collection"); static bool ValidateRateLimit(const char* flagname, double value) { const double EPSILON = 1e-10; - if ( value < -EPSILON ) { + if (value < -EPSILON) { fprintf(stderr, "Invalid value for --%s: %12.6f, must be >= 0.0\n", flagname, value); return false; @@ -907,24 +926,19 @@ DEFINE_bool(rate_limiter_auto_tuned, false, "Enable dynamic adjustment of rate limit according to demand for " "background I/O"); +DEFINE_bool(sine_write_rate, false, "Use a sine wave write_rate_limit"); -DEFINE_bool(sine_write_rate, false, - "Use a sine wave write_rate_limit"); - -DEFINE_uint64(sine_write_rate_interval_milliseconds, 10000, - "Interval of which the sine wave write_rate_limit is recalculated"); +DEFINE_uint64( + sine_write_rate_interval_milliseconds, 10000, + "Interval of which the sine wave write_rate_limit is recalculated"); -DEFINE_double(sine_a, 1, - "A in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_a, 1, "A in f(x) = A sin(bx + c) + d"); -DEFINE_double(sine_b, 1, - "B in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_b, 1, "B in f(x) = A sin(bx + c) + d"); -DEFINE_double(sine_c, 0, - "C in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_c, 0, "C in f(x) = A sin(bx + c) + d"); -DEFINE_double(sine_d, 1, - "D in f(x) = A sin(bx + c) + d"); +DEFINE_double(sine_d, 1, "D in f(x) = A sin(bx + c) + d"); DEFINE_bool(rate_limit_bg_reads, false, "Use options.rate_limiter on compaction reads"); @@ -952,7 +966,8 @@ DEFINE_bool(print_malloc_stats, false, DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions"); DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds."); -DEFINE_uint64(wal_size_limit_MB, 0, "Set the size limit for the WAL Files" +DEFINE_uint64(wal_size_limit_MB, 0, + "Set the size limit for the WAL Files" " in MB."); DEFINE_uint64(max_total_wal_size, 0, "Set total max WAL size"); @@ -975,7 +990,7 @@ DEFINE_bool(advise_random_on_open, rocksdb::Options().advise_random_on_open, DEFINE_string(compaction_fadvice, "NORMAL", "Access pattern advice when a file is compacted"); static auto FLAGS_compaction_fadvice_e = - rocksdb::Options().access_hint_on_compaction_start; + rocksdb::Options().access_hint_on_compaction_start; DEFINE_bool(use_tailing_iterator, false, "Use tailing iterator to access a series of keys instead of get"); @@ -983,12 +998,12 @@ DEFINE_bool(use_tailing_iterator, false, DEFINE_bool(use_adaptive_mutex, rocksdb::Options().use_adaptive_mutex, "Use adaptive mutex"); -DEFINE_uint64(bytes_per_sync, rocksdb::Options().bytes_per_sync, +DEFINE_uint64(bytes_per_sync, rocksdb::Options().bytes_per_sync, "Allows OS to incrementally sync SST files to disk while they are" " being written, in the background. Issue one request for every" " bytes_per_sync written. 0 turns it off."); -DEFINE_uint64(wal_bytes_per_sync, rocksdb::Options().wal_bytes_per_sync, +DEFINE_uint64(wal_bytes_per_sync, rocksdb::Options().wal_bytes_per_sync, "Allows OS to incrementally sync WAL files to disk while they are" " being written, in the background. Issue one request for every" " wal_bytes_per_sync written. 0 turns it off."); @@ -1018,30 +1033,36 @@ DEFINE_int32(num_deletion_threads, 1, "Number of threads to do deletion (used in TimeSeries and delete " "expire_style only)."); -DEFINE_int32(max_successive_merges, 0, "Maximum number of successive merge" +DEFINE_int32(max_successive_merges, 0, + "Maximum number of successive merge" " operations on a key in the memtable"); static bool ValidatePrefixSize(const char* flagname, int32_t value) { - if (value < 0 || value>=2000000000) { + if (value < 0 || value >= 2000000000) { fprintf(stderr, "Invalid value for --%s: %d. 0<= PrefixSize <=2000000000\n", flagname, value); return false; } return true; } -DEFINE_int32(prefix_size, 0, "control the prefix size for HashSkipList and " +DEFINE_int32(prefix_size, 0, + "control the prefix size for HashSkipList and " "plain table"); -DEFINE_int64(keys_per_prefix, 0, "control average number of keys generated " +DEFINE_int64(keys_per_prefix, 0, + "control average number of keys generated " "per prefix, 0 means no special handling of the prefix, " "i.e. use the prefix comes with the generated random number."); DEFINE_int32(memtable_insert_with_hint_prefix_size, 0, "If non-zero, enable " "memtable insert with hint with the given prefix size."); -DEFINE_bool(enable_io_prio, false, "Lower the background flush/compaction " +DEFINE_bool(enable_io_prio, false, + "Lower the background flush/compaction " "threads' IO priority"); -DEFINE_bool(enable_cpu_prio, false, "Lower the background flush/compaction " +DEFINE_bool(enable_cpu_prio, false, + "Lower the background flush/compaction " "threads' CPU priority"); -DEFINE_bool(identity_as_first_hash, false, "the first hash function of cuckoo " +DEFINE_bool(identity_as_first_hash, false, + "the first hash function of cuckoo " "table becomes an identity function. This is only valid when key " "is 8 bytes"); DEFINE_bool(dump_malloc_stats, true, "Dump malloc stats in LOG "); @@ -1077,24 +1098,30 @@ static enum RepFactory StringToRepFactory(const char* ctype) { static enum RepFactory FLAGS_rep_factory; DEFINE_string(memtablerep, "skip_list", ""); DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count"); -DEFINE_bool(use_plain_table, false, "if use plain table " +DEFINE_bool(use_plain_table, false, + "if use plain table " "instead of block-based table format"); DEFINE_bool(use_cuckoo_table, false, "if use cuckoo table format"); DEFINE_double(cuckoo_hash_ratio, 0.9, "Hash ratio for Cuckoo SST table."); -DEFINE_bool(use_hash_search, false, "if use kHashSearch " +DEFINE_bool(use_hash_search, false, + "if use kHashSearch " "instead of kBinarySearch. " "This is valid if only we use BlockTable"); -DEFINE_bool(use_block_based_filter, false, "if use kBlockBasedFilter " +DEFINE_bool(use_block_based_filter, false, + "if use kBlockBasedFilter " "instead of kFullFilter for filter block. " "This is valid if only we use BlockTable"); -DEFINE_string(merge_operator, "", "The merge operator to use with the database." +DEFINE_string(merge_operator, "", + "The merge operator to use with the database." "If a new merge operator is specified, be sure to use fresh" " database The possible merge operators are defined in" " utilities/merge_operators.h"); -DEFINE_int32(skip_list_lookahead, 0, "Used with skip_list memtablerep; try " +DEFINE_int32(skip_list_lookahead, 0, + "Used with skip_list memtablerep; try " "linear search first for this many steps from the previous " "position"); -DEFINE_bool(report_file_operations, false, "if report number of file " +DEFINE_bool(report_file_operations, false, + "if report number of file " "operations"); DEFINE_bool(use_titan, true, "Open a Titan instance."); @@ -1123,9 +1150,9 @@ DEFINE_int32(disable_seek_compaction, false, static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent); -static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((__unused__)) = - RegisterFlagValidator(&FLAGS_table_cache_numshardbits, - &ValidateTableCacheNumshardbits); +static const bool FLAGS_table_cache_numshardbits_dummy + __attribute__((__unused__)) = RegisterFlagValidator( + &FLAGS_table_cache_numshardbits, &ValidateTableCacheNumshardbits); namespace rocksdb { @@ -1233,7 +1260,9 @@ class ReportFileOpEnv : public EnvWrapper { return rv; } - Status Truncate(uint64_t size) override { return target_->Truncate(size); } + Status Truncate(uint64_t size) override { + return target_->Truncate(size); + } Status Close() override { return target_->Close(); } Status Flush() override { return target_->Flush(); } Status Sync() override { return target_->Sync(); } @@ -1310,7 +1339,7 @@ struct DBWithColumnFamilies { DB* db; #ifndef ROCKSDB_LITE OptimisticTransactionDB* opt_txn_db; -#endif // ROCKSDB_LITE +#endif // ROCKSDB_LITE std::atomic num_created; // Need to be updated after all the // new entries in cfh are set. size_t num_hot; // Number of column families to be queried at each moment. @@ -1323,7 +1352,8 @@ struct DBWithColumnFamilies { DBWithColumnFamilies() : db(nullptr) #ifndef ROCKSDB_LITE - , opt_txn_db(nullptr) + , + opt_txn_db(nullptr) #endif // ROCKSDB_LITE { cfh.clear(); @@ -1506,19 +1536,12 @@ enum OperationType : unsigned char { }; static std::unordered_map> - OperationTypeString = { - {kRead, "read"}, - {kWrite, "write"}, - {kDelete, "delete"}, - {kSeek, "seek"}, - {kMerge, "merge"}, - {kUpdate, "update"}, - {kCompress, "compress"}, - {kCompress, "uncompress"}, - {kCrc, "crc"}, - {kHash, "hash"}, - {kOthers, "op"} -}; + OperationTypeString = {{kRead, "read"}, {kWrite, "write"}, + {kDelete, "delete"}, {kSeek, "seek"}, + {kMerge, "merge"}, {kUpdate, "update"}, + {kCompress, "compress"}, {kCompress, "uncompress"}, + {kCrc, "crc"}, {kHash, "hash"}, + {kOthers, "op"}}; class CombinedStats; class Stats { @@ -1535,7 +1558,8 @@ class Stats { uint64_t last_op_finish_; uint64_t last_report_finish_; std::unordered_map, - std::hash> hist_; + std::hash> + hist_; std::string message_; bool exclude_from_merge_; ReporterAgent* reporter_agent_; // does not own @@ -1567,15 +1591,14 @@ class Stats { } void Merge(const Stats& other) { - if (other.exclude_from_merge_) - return; + if (other.exclude_from_merge_) return; for (auto it = other.hist_.begin(); it != other.hist_.end(); ++it) { auto this_it = hist_.find(it->first); if (this_it != hist_.end()) { this_it->second->Merge(*(other.hist_.at(it->first))); } else { - hist_.insert({ it->first, it->second }); + hist_.insert({it->first, it->second}); } } @@ -1594,9 +1617,7 @@ class Stats { seconds_ = (finish_ - start_) * 1e-6; } - void AddMessage(Slice msg) { - AppendWithSpace(&message_, msg); - } + void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); } void SetId(int id) { id_ = id; } void SetExcludeFromMerge() { exclude_from_merge_ = true; } @@ -1605,43 +1626,37 @@ class Stats { std::vector thread_list; FLAGS_env->GetThreadList(&thread_list); - fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", - "ThreadID", "ThreadType", "cfName", "Operation", - "ElapsedTime", "Stage", "State", "OperationProperties"); + fprintf(stderr, "\n%18s %10s %12s %20s %13s %45s %12s %s\n", "ThreadID", + "ThreadType", "cfName", "Operation", "ElapsedTime", "Stage", + "State", "OperationProperties"); int64_t current_time = 0; Env::Default()->GetCurrentTime(¤t_time); for (auto ts : thread_list) { fprintf(stderr, "%18" PRIu64 " %10s %12s %20s %13s %45s %12s", - ts.thread_id, - ThreadStatus::GetThreadTypeName(ts.thread_type).c_str(), - ts.cf_name.c_str(), - ThreadStatus::GetOperationName(ts.operation_type).c_str(), - ThreadStatus::MicrosToString(ts.op_elapsed_micros).c_str(), - ThreadStatus::GetOperationStageName(ts.operation_stage).c_str(), - ThreadStatus::GetStateName(ts.state_type).c_str()); + ts.thread_id, + ThreadStatus::GetThreadTypeName(ts.thread_type).c_str(), + ts.cf_name.c_str(), + ThreadStatus::GetOperationName(ts.operation_type).c_str(), + ThreadStatus::MicrosToString(ts.op_elapsed_micros).c_str(), + ThreadStatus::GetOperationStageName(ts.operation_stage).c_str(), + ThreadStatus::GetStateName(ts.state_type).c_str()); auto op_properties = ThreadStatus::InterpretOperationProperties( ts.operation_type, ts.op_properties); for (const auto& op_prop : op_properties) { - fprintf(stderr, " %s %" PRIu64" |", - op_prop.first.c_str(), op_prop.second); + fprintf(stderr, " %s %" PRIu64 " |", op_prop.first.c_str(), + op_prop.second); } fprintf(stderr, "\n"); } } - void ResetSineInterval() { - sine_interval_ = FLAGS_env->NowMicros(); - } + void ResetSineInterval() { sine_interval_ = FLAGS_env->NowMicros(); } - uint64_t GetSineInterval() { - return sine_interval_; - } + uint64_t GetSineInterval() { return sine_interval_; } - uint64_t GetStart() { - return start_; - } + uint64_t GetStart() { return start_; } void ResetLastOpTime() { // Set to now to avoid latency from calls to SleepForMicroseconds @@ -1657,8 +1672,7 @@ class Stats { uint64_t now = FLAGS_env->NowMicros(); uint64_t micros = now - last_op_finish_; - if (hist_.find(op_type) == hist_.end()) - { + if (hist_.find(op_type) == hist_.end()) { auto hist_temp = std::make_shared(); hist_.insert({op_type, std::move(hist_temp)}); } @@ -1674,13 +1688,20 @@ class Stats { done_ += num_ops; if (done_ >= next_report_) { if (!FLAGS_stats_interval) { - if (next_report_ < 1000) next_report_ += 100; - else if (next_report_ < 5000) next_report_ += 500; - else if (next_report_ < 10000) next_report_ += 1000; - else if (next_report_ < 50000) next_report_ += 5000; - else if (next_report_ < 100000) next_report_ += 10000; - else if (next_report_ < 500000) next_report_ += 50000; - else next_report_ += 100000; + if (next_report_ < 1000) + next_report_ += 100; + else if (next_report_ < 5000) + next_report_ += 500; + else if (next_report_ < 10000) + next_report_ += 1000; + else if (next_report_ < 50000) + next_report_ += 5000; + else if (next_report_ < 100000) + next_report_ += 10000; + else if (next_report_ < 500000) + next_report_ += 50000; + else + next_report_ += 100000; fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, ""); } else { uint64_t now = FLAGS_env->NowMicros(); @@ -1695,15 +1716,13 @@ class Stats { next_report_ += FLAGS_stats_interval; } else { - fprintf(stderr, - "%s ... thread %d: (%" PRIu64 ",%" PRIu64 ") ops and " + "%s ... thread %d: (%" PRIu64 ",%" PRIu64 + ") ops and " "(%.1f,%.1f) ops/second in (%.6f,%.6f) seconds\n", - FLAGS_env->TimeToString(now/1000000).c_str(), - id_, + FLAGS_env->TimeToString(now / 1000000).c_str(), id_, done_ - last_report_done_, done_, - (done_ - last_report_done_) / - (usecs_since_last / 1000000.0), + (done_ - last_report_done_) / (usecs_since_last / 1000000.0), done_ / ((now - start_) / 1000000.0), (now - last_report_finish_) / 1000000.0, (now - start_) / 1000000.0); @@ -1762,9 +1781,7 @@ class Stats { } } - void AddBytes(int64_t n) { - bytes_ += n; - } + void AddBytes(int64_t n) { bytes_ += n; } void Report(const Slice& name) { // Pretend at least one op was done in case we are running a benchmark @@ -1783,14 +1800,11 @@ class Stats { } AppendWithSpace(&extra, message_); double elapsed = (finish_ - start_) * 1e-6; - double throughput = (double)done_/elapsed; + double throughput = (double)done_ / elapsed; fprintf(stdout, "%-12s : %11.3f micros/op %ld ops/sec;%s%s\n", - name.ToString().c_str(), - elapsed * 1e6 / done_, - (long)throughput, - (extra.empty() ? "" : " "), - extra.c_str()); + name.ToString().c_str(), elapsed * 1e6 / done_, (long)throughput, + (extra.empty() ? "" : " "), extra.c_str()); if (FLAGS_histogram) { for (auto it = hist_.begin(); it != hist_.end(); ++it) { fprintf(stdout, "Microseconds per %s:\n%s\n", @@ -1915,27 +1929,25 @@ struct SharedState { long num_done; bool start; - SharedState() : cv(&mu), perf_level(FLAGS_perf_level) { } + SharedState() : cv(&mu), perf_level(FLAGS_perf_level) {} }; // Per-thread state for concurrent executions of the same benchmark. struct ThreadState { - int tid; // 0..n-1 when running in n threads - Random64 rand; // Has different seeds for different threads + int tid; // 0..n-1 when running in n threads + Random64 rand; // Has different seeds for different threads Stats stats; SharedState* shared; /* implicit */ ThreadState(int index) - : tid(index), - rand((FLAGS_seed ? FLAGS_seed : 1000) + index) { - } + : tid(index), rand((FLAGS_seed ? FLAGS_seed : 1000) + index) {} }; class Duration { public: Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) { max_seconds_ = max_seconds; - max_ops_= max_ops; + max_ops_ = max_ops; ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops; ops_ = 0; start_at_ = FLAGS_env->NowMicros(); @@ -1944,7 +1956,7 @@ class Duration { int64_t GetStage() { return std::min(ops_, max_ops_ - 1) / ops_per_stage_; } bool Done(int64_t increment) { - if (increment <= 0) increment = 1; // avoid Done(0) and infinite loops + if (increment <= 0) increment = 1; // avoid Done(0) and infinite loops ops_ += increment; if (max_seconds_) { @@ -2081,8 +2093,7 @@ class Benchmark { compressed); break; case rocksdb::kXpressCompression: - ok = XPRESS_Compress(input.data(), - input.size(), compressed); + ok = XPRESS_Compress(input.data(), input.size(), compressed); break; case rocksdb::kZSTD: ok = ZSTD_Compress(compression_ctx, input.data(), input.size(), @@ -2104,12 +2115,12 @@ class Benchmark { fprintf(stdout, "Prefix: %d bytes\n", FLAGS_prefix_size); fprintf(stdout, "Keys per prefix: %" PRIu64 "\n", keys_per_prefix_); fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast(FLAGS_key_size + FLAGS_value_size) * num_) - / 1048576.0)); + ((static_cast(FLAGS_key_size + FLAGS_value_size) * num_) / + 1048576.0)); fprintf(stdout, "FileSize: %.1f MB (estimated)\n", - (((FLAGS_key_size + FLAGS_value_size * FLAGS_compression_ratio) - * num_) - / 1048576.0)); + (((FLAGS_key_size + FLAGS_value_size * FLAGS_compression_ratio) * + num_) / + 1048576.0)); fprintf(stdout, "Write rate: %" PRIu64 " bytes/second\n", FLAGS_benchmark_write_rate_limit); fprintf(stdout, "Read rate: %" PRIu64 " ops/second\n", @@ -2155,9 +2166,9 @@ class Benchmark { void PrintWarnings(const char* compression) { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf(stdout, - "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n" - ); + fprintf( + stdout, + "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, @@ -2191,7 +2202,7 @@ class Benchmark { start++; } unsigned int limit = static_cast(s.size()); - while (limit > start && isspace(s[limit-1])) { + while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); @@ -2199,8 +2210,8 @@ class Benchmark { #endif void PrintEnvironment() { - fprintf(stderr, "RocksDB: version %d.%d\n", - kMajorVersion, kMinorVersion); + fprintf(stderr, "RocksDB: version %d.%d\n", kMajorVersion, + kMinorVersion); #if defined(__linux) time_t now = time(nullptr); @@ -2465,36 +2476,38 @@ class Benchmark { return base_name + ToString(id); } -void VerifyDBFromDB(std::string& truth_db_name) { - DBWithColumnFamilies truth_db; - auto s = DB::OpenForReadOnly(open_options_, truth_db_name, &truth_db.db); - if (!s.ok()) { - fprintf(stderr, "open error: %s\n", s.ToString().c_str()); - exit(1); - } - ReadOptions ro; - ro.total_order_seek = true; - std::unique_ptr truth_iter(truth_db.db->NewIterator(ro)); - std::unique_ptr db_iter(db_.db->NewIterator(ro)); - // Verify that all the key/values in truth_db are retrivable in db with ::Get - fprintf(stderr, "Verifying db >= truth_db with ::Get...\n"); - for (truth_iter->SeekToFirst(); truth_iter->Valid(); truth_iter->Next()) { + void VerifyDBFromDB(std::string& truth_db_name) { + DBWithColumnFamilies truth_db; + auto s = DB::OpenForReadOnly(open_options_, truth_db_name, &truth_db.db); + if (!s.ok()) { + fprintf(stderr, "open error: %s\n", s.ToString().c_str()); + exit(1); + } + ReadOptions ro; + ro.total_order_seek = true; + std::unique_ptr truth_iter(truth_db.db->NewIterator(ro)); + std::unique_ptr db_iter(db_.db->NewIterator(ro)); + // Verify that all the key/values in truth_db are retrivable in db with + // ::Get + fprintf(stderr, "Verifying db >= truth_db with ::Get...\n"); + for (truth_iter->SeekToFirst(); truth_iter->Valid(); truth_iter->Next()) { std::string value; s = db_.db->Get(ro, truth_iter->key(), &value); assert(s.ok()); // TODO(myabandeh): provide debugging hints assert(Slice(value) == truth_iter->value()); + } + // Verify that the db iterator does not give any extra key/value + fprintf(stderr, "Verifying db == truth_db...\n"); + for (db_iter->SeekToFirst(), truth_iter->SeekToFirst(); db_iter->Valid(); + db_iter->Next(), truth_iter->Next()) { + assert(truth_iter->Valid()); + assert(truth_iter->value() == db_iter->value()); + } + // No more key should be left unchecked in truth_db + assert(!truth_iter->Valid()); + fprintf(stderr, "...Verified\n"); } - // Verify that the db iterator does not give any extra key/value - fprintf(stderr, "Verifying db == truth_db...\n"); - for (db_iter->SeekToFirst(), truth_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next(), truth_iter->Next()) { - assert(truth_iter->Valid()); - assert(truth_iter->value() == db_iter->value()); - } - // No more key should be left unchecked in truth_db - assert(!truth_iter->Valid()); - fprintf(stderr, "...Verified\n"); -} void Run() { if (!SanityCheck()) { @@ -2868,7 +2881,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } } - SetPerfLevel(static_cast (shared->perf_level)); + SetPerfLevel(static_cast(shared->perf_level)); perf_context.EnablePerLevelPerfContext(); thread->stats.Start(thread->tid); (arg->bm->*(arg->method))(thread); @@ -2964,7 +2977,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { void Crc32c(ThreadState* thread) { // Checksum about 500MB of data total - const int size = FLAGS_block_size; // use --block_size option for db_bench + const int size = FLAGS_block_size; // use --block_size option for db_bench std::string labels = "(" + ToString(FLAGS_block_size) + " per op)"; const char* label = labels.c_str(); @@ -3006,7 +3019,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { int dummy; std::atomic ap(&dummy); int count = 0; - void *ptr = nullptr; + void* ptr = nullptr; thread->stats.AddMessage("(each op is 1000 loads)"); while (count < 100000) { for (int i = 0; i < 1000; i++) { @@ -3018,7 +3031,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { if (ptr == nullptr) exit(1); // Disable unused variable warning. } - void Compress(ThreadState *thread) { + void Compress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(FLAGS_block_size); int64_t bytes = 0; @@ -3048,7 +3061,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } } - void Uncompress(ThreadState *thread) { + void Uncompress(ThreadState* thread) { RandomGenerator gen; Slice input = gen.Generate(FLAGS_block_size); std::string compressed; @@ -3076,38 +3089,39 @@ void VerifyDBFromDB(std::string& truth_db_name) { uncompressed.get()); break; } - case rocksdb::kZlibCompression: - uncompressed = Zlib_Uncompress(uncompression_ctx, compressed.data(), - compressed.size(), &decompress_size, 2); - ok = uncompressed.get() != nullptr; - break; - case rocksdb::kBZip2Compression: - uncompressed = BZip2_Uncompress(compressed.data(), compressed.size(), - &decompress_size, 2); - ok = uncompressed.get() != nullptr; - break; - case rocksdb::kLZ4Compression: - uncompressed = LZ4_Uncompress(uncompression_ctx, compressed.data(), - compressed.size(), &decompress_size, 2); - ok = uncompressed.get() != nullptr; - break; - case rocksdb::kLZ4HCCompression: - uncompressed = LZ4_Uncompress(uncompression_ctx, compressed.data(), - compressed.size(), &decompress_size, 2); - ok = uncompressed.get() != nullptr; - break; - case rocksdb::kXpressCompression: - uncompressed.reset(XPRESS_Uncompress( - compressed.data(), compressed.size(), &decompress_size)); - ok = uncompressed.get() != nullptr; - break; - case rocksdb::kZSTD: - uncompressed = ZSTD_Uncompress(uncompression_ctx, compressed.data(), - compressed.size(), &decompress_size); - ok = uncompressed.get() != nullptr; - break; - default: - ok = false; + case rocksdb::kZlibCompression: + uncompressed = + Zlib_Uncompress(uncompression_ctx, compressed.data(), + compressed.size(), &decompress_size, 2); + ok = uncompressed.get() != nullptr; + break; + case rocksdb::kBZip2Compression: + uncompressed = BZip2_Uncompress(compressed.data(), compressed.size(), + &decompress_size, 2); + ok = uncompressed.get() != nullptr; + break; + case rocksdb::kLZ4Compression: + uncompressed = LZ4_Uncompress(uncompression_ctx, compressed.data(), + compressed.size(), &decompress_size, 2); + ok = uncompressed.get() != nullptr; + break; + case rocksdb::kLZ4HCCompression: + uncompressed = LZ4_Uncompress(uncompression_ctx, compressed.data(), + compressed.size(), &decompress_size, 2); + ok = uncompressed.get() != nullptr; + break; + case rocksdb::kXpressCompression: + uncompressed.reset(XPRESS_Uncompress( + compressed.data(), compressed.size(), &decompress_size)); + ok = uncompressed.get() != nullptr; + break; + case rocksdb::kZSTD: + uncompressed = ZSTD_Uncompress(uncompression_ctx, compressed.data(), + compressed.size(), &decompress_size); + ok = uncompressed.get() != nullptr; + break; + default: + ok = false; } bytes += input.size(); thread->stats.FinishedOps(nullptr, nullptr, 1, kUncompress); @@ -3158,7 +3172,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { options.write_buffer_size = FLAGS_write_buffer_size; options.max_write_buffer_number = FLAGS_max_write_buffer_number; options.min_write_buffer_number_to_merge = - FLAGS_min_write_buffer_number_to_merge; + FLAGS_min_write_buffer_number_to_merge; options.max_write_buffer_number_to_maintain = FLAGS_max_write_buffer_number_to_maintain; options.max_background_jobs = FLAGS_max_background_jobs; @@ -3216,14 +3230,15 @@ void VerifyDBFromDB(std::string& truth_db_name) { FLAGS_max_bytes_for_level_multiplier; if ((FLAGS_prefix_size == 0) && (FLAGS_rep_factory == kPrefixHash || FLAGS_rep_factory == kHashLinkedList)) { - fprintf(stderr, "prefix_size should be non-zero if PrefixHash or " - "HashLinkedList memtablerep is used\n"); + fprintf(stderr, + "prefix_size should be non-zero if PrefixHash or " + "HashLinkedList memtablerep is used\n"); exit(1); } switch (FLAGS_rep_factory) { case kSkipList: - options.memtable_factory.reset(new SkipListFactory( - FLAGS_skip_list_lookahead)); + options.memtable_factory.reset( + new SkipListFactory(FLAGS_skip_list_lookahead)); break; #ifndef ROCKSDB_LITE case kPrefixHash: @@ -3231,13 +3246,11 @@ void VerifyDBFromDB(std::string& truth_db_name) { NewHashSkipListRepFactory(FLAGS_hash_bucket_count)); break; case kHashLinkedList: - options.memtable_factory.reset(NewHashLinkListRepFactory( - FLAGS_hash_bucket_count)); + options.memtable_factory.reset( + NewHashLinkListRepFactory(FLAGS_hash_bucket_count)); break; case kVectorRep: - options.memtable_factory.reset( - new VectorRepFactory - ); + options.memtable_factory.reset(new VectorRepFactory); break; case kCuckoo: options.memtable_factory.reset(NewHashCuckooRepFactory( @@ -3286,8 +3299,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { rocksdb::CuckooTableOptions table_options; table_options.hash_table_ratio = FLAGS_cuckoo_hash_ratio; table_options.identity_as_first_hash = FLAGS_identity_as_first_hash; - options.table_factory = std::shared_ptr( - NewCuckooTableFactory(table_options)); + options.table_factory = + std::shared_ptr(NewCuckooTableFactory(table_options)); #else fprintf(stderr, "Cuckoo table is not supported in lite mode\n"); exit(1); @@ -3297,7 +3310,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { if (FLAGS_use_hash_search) { if (FLAGS_prefix_size == 0) { fprintf(stderr, - "prefix_size not assigned when enable use_hash_search \n"); + "prefix_size not assigned when enable use_hash_search \n"); exit(1); } block_based_options.index_type = BlockBasedTableOptions::kHashSearch; @@ -3402,13 +3415,13 @@ void VerifyDBFromDB(std::string& truth_db_name) { exit(1); } options.max_bytes_for_level_multiplier_additional = - FLAGS_max_bytes_for_level_multiplier_additional_v; + FLAGS_max_bytes_for_level_multiplier_additional_v; } options.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger; options.level0_file_num_compaction_trigger = FLAGS_level0_file_num_compaction_trigger; options.level0_slowdown_writes_trigger = - FLAGS_level0_slowdown_writes_trigger; + FLAGS_level0_slowdown_writes_trigger; options.compression = FLAGS_compression_type_e; options.WAL_ttl_seconds = FLAGS_wal_ttl_seconds; options.WAL_size_limit_MB = FLAGS_wal_size_limit_MB; @@ -3420,8 +3433,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { for (int i = 0; i < FLAGS_min_level_to_compress; i++) { options.compression_per_level[i] = kNoCompression; } - for (int i = FLAGS_min_level_to_compress; - i < FLAGS_num_levels; i++) { + for (int i = FLAGS_min_level_to_compress; i < FLAGS_num_levels; i++) { options.compression_per_level[i] = FLAGS_compression_type_e; } } @@ -3442,7 +3454,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { options.write_thread_max_yield_usec = FLAGS_write_thread_max_yield_usec; options.write_thread_slow_yield_usec = FLAGS_write_thread_slow_yield_usec; options.rate_limit_delay_max_milliseconds = - FLAGS_rate_limit_delay_max_milliseconds; + FLAGS_rate_limit_delay_max_milliseconds; options.table_cache_numshardbits = FLAGS_table_cache_numshardbits; options.max_compaction_bytes = FLAGS_max_compaction_bytes; options.disable_auto_compactions = FLAGS_disable_auto_compactions; @@ -3456,8 +3468,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { options.wal_bytes_per_sync = FLAGS_wal_bytes_per_sync; // merge operator options - options.merge_operator = MergeOperators::CreateFromStringId( - FLAGS_merge_operator); + options.merge_operator = + MergeOperators::CreateFromStringId(FLAGS_merge_operator); if (options.merge_operator == nullptr && !FLAGS_merge_operator.empty()) { fprintf(stderr, "invalid merge operator: %s\n", FLAGS_merge_operator.c_str()); @@ -3469,23 +3481,23 @@ void VerifyDBFromDB(std::string& truth_db_name) { // set universal style compaction configurations, if applicable if (FLAGS_universal_size_ratio != 0) { options.compaction_options_universal.size_ratio = - FLAGS_universal_size_ratio; + FLAGS_universal_size_ratio; } if (FLAGS_universal_min_merge_width != 0) { options.compaction_options_universal.min_merge_width = - FLAGS_universal_min_merge_width; + FLAGS_universal_min_merge_width; } if (FLAGS_universal_max_merge_width != 0) { options.compaction_options_universal.max_merge_width = - FLAGS_universal_max_merge_width; + FLAGS_universal_max_merge_width; } if (FLAGS_universal_max_size_amplification_percent != 0) { options.compaction_options_universal.max_size_amplification_percent = - FLAGS_universal_max_size_amplification_percent; + FLAGS_universal_max_size_amplification_percent; } if (FLAGS_universal_compression_size_percent != -1) { options.compaction_options_universal.compression_size_percent = - FLAGS_universal_compression_size_percent; + FLAGS_universal_compression_size_percent; } options.compaction_options_universal.allow_trivial_move = FLAGS_universal_allow_trivial_move; @@ -3499,7 +3511,6 @@ void VerifyDBFromDB(std::string& truth_db_name) { exit(1); } #endif // ROCKSDB_LITE - } void InitializeOptionsGeneral(titandb::TitanOptions* opts) { @@ -3569,7 +3580,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } options.listeners.emplace_back(listener_); - + opts->min_blob_size = 0; opts->min_gc_batch_size = 128 << 20; opts->blob_file_compression = FLAGS_compression_type_e; @@ -3610,7 +3621,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } void OpenDb(titandb::TitanOptions options, const std::string& db_name, - DBWithColumnFamilies* db) { + DBWithColumnFamilies* db) { Status s; // Open with column families if necessary. if (FLAGS_num_column_families > 1) { @@ -3624,7 +3635,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { std::vector column_families; for (size_t i = 0; i < num_hot; i++) { column_families.push_back(ColumnFamilyDescriptor( - ColumnFamilyName(i), ColumnFamilyOptions(options))); + ColumnFamilyName(i), ColumnFamilyOptions(options))); } std::vector cfh_idx_to_prob; if (!FLAGS_column_family_distribution.empty()) { @@ -3650,8 +3661,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { } #ifndef ROCKSDB_LITE if (FLAGS_readonly) { - s = DB::OpenForReadOnly(options, db_name, column_families, - &db->cfh, &db->db); + s = DB::OpenForReadOnly(options, db_name, column_families, &db->cfh, + &db->db); } else if (FLAGS_optimistic_transaction_db) { s = OptimisticTransactionDB::Open(options, db_name, column_families, &db->cfh, &db->opt_txn_db); @@ -3736,9 +3747,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } } - enum WriteMode { - RANDOM, SEQUENTIAL, UNIQUE_RANDOM - }; + enum WriteMode { RANDOM, SEQUENTIAL, UNIQUE_RANDOM }; void WriteSeqDeterministic(ThreadState* thread) { DoDeterministicCompact(thread, open_options_.compaction_style, SEQUENTIAL); @@ -3749,13 +3758,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { UNIQUE_RANDOM); } - void WriteSeq(ThreadState* thread) { - DoWrite(thread, SEQUENTIAL); - } + void WriteSeq(ThreadState* thread) { DoWrite(thread, SEQUENTIAL); } - void WriteRandom(ThreadState* thread) { - DoWrite(thread, RANDOM); - } + void WriteRandom(ThreadState* thread) { DoWrite(thread, RANDOM); } void WriteUniqueRandom(ThreadState* thread) { DoWrite(thread, UNIQUE_RANDOM); @@ -3803,9 +3808,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { std::vector values_; }; - DB* SelectDB(ThreadState* thread) { - return SelectDBWithCfh(thread)->db; - } + DB* SelectDB(ThreadState* thread) { return SelectDBWithCfh(thread)->db; } DBWithColumnFamilies* SelectDBWithCfh(ThreadState* thread) { return SelectDBWithCfh(thread->rand.Next()); @@ -3814,13 +3817,13 @@ void VerifyDBFromDB(std::string& truth_db_name) { DBWithColumnFamilies* SelectDBWithCfh(uint64_t rand_int) { if (db_.db != nullptr) { return &db_; - } else { + } else { return &multi_dbs_[rand_int % multi_dbs_.size()]; } } double SineRate(double x) { - return FLAGS_sine_a*sin((FLAGS_sine_b*x) + FLAGS_sine_c) + FLAGS_sine_d; + return FLAGS_sine_a * sin((FLAGS_sine_b * x) + FLAGS_sine_c) + FLAGS_sine_d; } void DoWrite(ThreadState* thread, WriteMode write_mode) { @@ -3986,12 +3989,12 @@ void VerifyDBFromDB(std::string& truth_db_name) { if (usecs_since_last > (FLAGS_sine_write_rate_interval_milliseconds * uint64_t{1000})) { double usecs_since_start = - static_cast(now - thread->stats.GetStart()); + static_cast(now - thread->stats.GetStart()); thread->stats.ResetSineInterval(); uint64_t write_rate = - static_cast(SineRate(usecs_since_start / 1000000.0)); + static_cast(SineRate(usecs_since_start / 1000000.0)); thread->shared->write_rate_limiter.reset( - NewGenericRateLimiter(write_rate)); + NewGenericRateLimiter(write_rate)); } } if (!s.ok()) { @@ -4072,11 +4075,13 @@ void VerifyDBFromDB(std::string& truth_db_name) { continue; } } - writes_ /= static_cast(open_options_.max_bytes_for_level_multiplier); + writes_ /= + static_cast(open_options_.max_bytes_for_level_multiplier); } for (size_t i = 0; i < num_db; i++) { if (sorted_runs[i].size() < num_levels - 1) { - fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels); + fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", + num_levels); exit(1); } } @@ -4087,13 +4092,14 @@ void VerifyDBFromDB(std::string& truth_db_name) { auto options = db->GetOptions(); MutableCFOptions mutable_cf_options(options); for (size_t j = 0; j < sorted_runs[i].size(); j++) { - compactionOptions.output_file_size_limit = - MaxFileSizeForLevel(mutable_cf_options, - static_cast(output_level), compaction_style); + compactionOptions.output_file_size_limit = MaxFileSizeForLevel( + mutable_cf_options, static_cast(output_level), + compaction_style); std::cout << sorted_runs[i][j].size() << std::endl; - db->CompactFiles(compactionOptions, {sorted_runs[i][j].back().name, - sorted_runs[i][j].front().name}, - static_cast(output_level - j) /*level*/); + db->CompactFiles( + compactionOptions, + {sorted_runs[i][j].back().name, sorted_runs[i][j].front().name}, + static_cast(output_level - j) /*level*/); } } } else if (compaction_style == kCompactionStyleUniversal) { @@ -4124,11 +4130,13 @@ void VerifyDBFromDB(std::string& truth_db_name) { } num_files_at_level0[i] = meta.levels[0].files.size(); } - writes_ = static_cast(writes_* static_cast(100) / (ratio + 200)); + writes_ = static_cast(writes_ * static_cast(100) / + (ratio + 200)); } for (size_t i = 0; i < num_db; i++) { if (sorted_runs[i].size() < num_levels) { - fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", num_levels); + fprintf(stderr, "n is too small to fill %" ROCKSDB_PRIszt " levels\n", + num_levels); exit(1); } } @@ -4139,9 +4147,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { auto options = db->GetOptions(); MutableCFOptions mutable_cf_options(options); for (size_t j = 0; j < sorted_runs[i].size(); j++) { - compactionOptions.output_file_size_limit = - MaxFileSizeForLevel(mutable_cf_options, - static_cast(output_level), compaction_style); + compactionOptions.output_file_size_limit = MaxFileSizeForLevel( + mutable_cf_options, static_cast(output_level), + compaction_style); db->CompactFiles( compactionOptions, {sorted_runs[i][j].back().name, sorted_runs[i][j].front().name}, @@ -4152,7 +4160,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } else if (compaction_style == kCompactionStyleFIFO) { if (num_levels != 1) { return Status::InvalidArgument( - "num_levels should be 1 for FIFO compaction"); + "num_levels should be 1 for FIFO compaction"); } if (FLAGS_num_multi_db != 0) { return Status::InvalidArgument("Doesn't support multiDB"); @@ -4169,7 +4177,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { db->GetColumnFamilyMetaData(&meta); auto total_size = meta.levels[0].size; if (total_size >= - db->GetOptions().compaction_options_fifo.max_table_files_size) { + db->GetOptions().compaction_options_fifo.max_table_files_size) { for (auto file_meta : meta.levels[0].files) { file_names.emplace_back(file_meta.name); } @@ -4206,8 +4214,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { db->GetColumnFamilyMetaData(&meta); auto total_size = meta.levels[0].size; assert(total_size <= - db->GetOptions().compaction_options_fifo.max_table_files_size); - break; + db->GetOptions().compaction_options_fifo.max_table_files_size); + break; } // verify smallest/largest seqno and key range of each sorted run @@ -4273,7 +4281,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { for (size_t k = 0; k < num_db; k++) { auto db = db_list[k]; fprintf(stdout, - "---------------------- DB %" ROCKSDB_PRIszt " LSM ---------------------\n", k); + "---------------------- DB %" ROCKSDB_PRIszt + " LSM ---------------------\n", + k); db->GetColumnFamilyMetaData(&meta); for (auto& levelMeta : meta.levels) { if (levelMeta.files.empty()) { @@ -4420,7 +4430,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { } while (!duration.Done(100)); char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found, " + snprintf(msg, sizeof(msg), + "(%" PRIu64 " of %" PRIu64 + " found, " "issued %" PRIu64 " non-exist keys)\n", found, read, nonexist); @@ -4499,8 +4511,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { } char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", - found, read); + snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found, + read); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); @@ -4519,7 +4531,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { int64_t found = 0; ReadOptions options(FLAGS_verify_checksum, true); std::vector keys; - std::vector > key_guards; + std::vector> key_guards; std::vector values(entries_per_batch_); while (static_cast(keys.size()) < entries_per_batch_) { key_guards.push_back(std::unique_ptr()); @@ -4556,8 +4568,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { } char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", - found, read); + snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", found, + read); thread->stats.AddMessage(msg); } @@ -4680,8 +4692,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { } char msg[100]; - snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", - found, read); + snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)\n", found, + read); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); if (FLAGS_perf_level > rocksdb::PerfLevel::kDisable) { @@ -4731,13 +4743,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { } } - void DeleteSeq(ThreadState* thread) { - DoDelete(thread, true); - } + void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); } - void DeleteRandom(ThreadState* thread) { - DoDelete(thread, false); - } + void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); } void ReadWhileWriting(ThreadState* thread) { if (thread->tid > 0) { @@ -4881,7 +4889,6 @@ void VerifyDBFromDB(std::string& truth_db_name) { return s; } - // Given a key K, this deletes (K+"0", V), (K+"1", V), (K+"2", V) // in DB atomically i.e in a single batch. Also refer GetMany. Status DeleteMany(DB* db, const WriteOptions& writeoptions, @@ -4972,7 +4979,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { put_weight = 100 - get_weight - delete_weight; } GenerateKeyFromInt(thread->rand.Next() % FLAGS_numdistinct, - FLAGS_numdistinct, &key); + FLAGS_numdistinct, &key); if (get_weight > 0) { // do all the gets first Status s = GetMany(db, options, key, &value); @@ -5010,8 +5017,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { } char msg[128]; snprintf(msg, sizeof(msg), - "( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" \ - PRIu64 " found:%" PRIu64 ")", + "( get:%" PRIu64 " put:%" PRIu64 " del:%" PRIu64 " total:%" PRIu64 + " found:%" PRIu64 ")", gets_done, puts_done, deletes_done, readwrites_, found); thread->stats.AddMessage(msg); } @@ -5054,7 +5061,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { get_weight--; reads_done++; thread->stats.FinishedOps(nullptr, db, 1, kRead); - } else if (put_weight > 0) { + } else if (put_weight > 0) { // then do all the corresponding number of puts // for all the gets we have done earlier Status s = db->Put(write_options_, key, gen.Generate(value_size_)); @@ -5068,8 +5075,9 @@ void VerifyDBFromDB(std::string& truth_db_name) { } } char msg[100]; - snprintf(msg, sizeof(msg), "( reads:%" PRIu64 " writes:%" PRIu64 \ - " total:%" PRIu64 " found:%" PRIu64 ")", + snprintf(msg, sizeof(msg), + "( reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64 + " found:%" PRIu64 ")", reads_done, writes_done, readwrites_, found); thread->stats.AddMessage(msg); } @@ -5116,8 +5124,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { thread->stats.FinishedOps(nullptr, db, 1, kUpdate); } char msg[100]; - snprintf(msg, sizeof(msg), - "( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found); + snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")", + readwrites_, found); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); } @@ -5169,8 +5177,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { thread->stats.FinishedOps(nullptr, db, 1); } char msg[100]; - snprintf(msg, sizeof(msg), - "( updates:%" PRIu64 " found:%" PRIu64 ")", readwrites_, found); + snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")", + readwrites_, found); thread->stats.AddMessage(msg); } @@ -5209,7 +5217,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { Slice operand = gen.Generate(value_size_); if (value.size() > 0) { // Use a delimiter to match the semantics for StringAppendOperator - value.append(1,','); + value.append(1, ','); } value.append(operand.data(), operand.size()); @@ -5225,7 +5233,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { char msg[100]; snprintf(msg, sizeof(msg), "( updates:%" PRIu64 " found:%" PRIu64 ")", - readwrites_, found); + readwrites_, found); thread->stats.AddBytes(bytes); thread->stats.AddMessage(msg); } @@ -5314,8 +5322,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { thread->stats.FinishedOps(nullptr, db, 1, kMerge); } else { Status s = db->Get(options, key, &value); - if (value.length() > max_length) - max_length = value.length(); + if (value.length() > max_length) max_length = value.length(); if (!s.ok() && !s.IsNotFound()) { fprintf(stderr, "get error: %s\n", s.ToString().c_str()); @@ -5345,7 +5352,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { DB* db = SelectDB(thread); std::unique_ptr iter( - db->NewIterator(ReadOptions(FLAGS_verify_checksum, true))); + db->NewIterator(ReadOptions(FLAGS_verify_checksum, true))); std::unique_ptr key_guard; Slice key = AllocateKey(&key_guard); @@ -5462,9 +5469,8 @@ void VerifyDBFromDB(std::string& truth_db_name) { return; } - Status s = - RandomTransactionInserter::Verify(db_.db, - static_cast(FLAGS_transaction_sets)); + Status s = RandomTransactionInserter::Verify( + db_.db, static_cast(FLAGS_transaction_sets)); if (s.ok()) { fprintf(stdout, "RandomTransactionVerify Success.\n"); @@ -5774,7 +5780,7 @@ int db_bench_tool(int argc, char** argv) { initialized = true; } ParseCommandLineFlags(&argc, &argv, true); - FLAGS_compaction_style_e = (rocksdb::CompactionStyle) FLAGS_compaction_style; + FLAGS_compaction_style_e = (rocksdb::CompactionStyle)FLAGS_compaction_style; #ifndef ROCKSDB_LITE if (FLAGS_statistics && !FLAGS_statistics_string.empty()) { fprintf(stderr, @@ -5810,7 +5816,7 @@ int db_bench_tool(int argc, char** argv) { } FLAGS_compression_type_e = - StringToCompressionType(FLAGS_compression_type.c_str()); + StringToCompressionType(FLAGS_compression_type.c_str()); #ifndef ROCKSDB_LITE std::unique_ptr custom_env_guard; @@ -5826,7 +5832,7 @@ int db_bench_tool(int argc, char** argv) { } #endif // ROCKSDB_LITE if (!FLAGS_hdfs.empty()) { - FLAGS_env = new rocksdb::HdfsEnv(FLAGS_hdfs); + FLAGS_env = new rocksdb::HdfsEnv(FLAGS_hdfs); } if (!strcasecmp(FLAGS_compaction_fadvice.c_str(), "NONE")) diff --git a/tools/titandb_stress.cc b/tools/titandb_stress.cc index 6477bd31e..9ab010a0c 100644 --- a/tools/titandb_stress.cc +++ b/tools/titandb_stress.cc @@ -89,9 +89,7 @@ static const int kValueMaxLen = 1024 * 1024; static bool ValidateUint32Range(const char* flagname, uint64_t value) { if (value > std::numeric_limits::max()) { - fprintf(stderr, - "Invalid value for --%s: %lu, overflow\n", - flagname, + fprintf(stderr, "Invalid value for --%s: %lu, overflow\n", flagname, (unsigned long)value); return false; } @@ -102,7 +100,7 @@ DEFINE_uint64(seed, 2341234, "Seed for PRNG"); static const bool FLAGS_seed_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_seed, &ValidateUint32Range); -DEFINE_int64(max_key, 1 * KB* KB, +DEFINE_int64(max_key, 1 * KB * KB, "Max number of key/values to place in database"); DEFINE_int32(column_families, 10, "Number of column families"); @@ -255,13 +253,16 @@ DEFINE_int32(max_background_flushes, rocksdb::Options().max_background_flushes, "The maximum number of concurrent background flushes " "that can occur in parallel."); -DEFINE_int32(universal_size_ratio, 0, "The ratio of file sizes that trigger" +DEFINE_int32(universal_size_ratio, 0, + "The ratio of file sizes that trigger" " compaction in universal style"); -DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files to " +DEFINE_int32(universal_min_merge_width, 0, + "The minimum number of files to " "compact in universal style compaction"); -DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact" +DEFINE_int32(universal_max_merge_width, 0, + "The max number of files to compact" " in universal style compaction"); DEFINE_int32(universal_max_size_amplification_percent, 0, @@ -299,8 +300,8 @@ static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) = static bool ValidateInt32Positive(const char* flagname, int32_t value) { if (value < 0) { - fprintf(stderr, "Invalid value for --%s: %d, must be >=0\n", - flagname, value); + fprintf(stderr, "Invalid value for --%s: %d, must be >=0\n", flagname, + value); return false; } return true; @@ -309,11 +310,13 @@ DEFINE_int32(reopen, 10, "Number of times database reopens"); static const bool FLAGS_reopen_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_reopen, &ValidateInt32Positive); -DEFINE_int32(bloom_bits, 10, "Bloom filter bits per key. " +DEFINE_int32(bloom_bits, 10, + "Bloom filter bits per key. " "Negative means use default settings."); -DEFINE_bool(use_block_based_filter, false, "use block based filter" - "instead of full filter for block based table"); +DEFINE_bool(use_block_based_filter, false, + "use block based filter" + "instead of full filter for block based table"); DEFINE_string(db, "", "Use the db with the following name."); @@ -435,9 +438,9 @@ DEFINE_int32(max_background_gc, "that can occur in parallel."); static bool ValidateInt32Percent(const char* flagname, int32_t value) { - if (value < 0 || value>100) { - fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n", - flagname, value); + if (value < 0 || value > 100) { + fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n", flagname, + value); return false; } return true; @@ -476,7 +479,8 @@ DEFINE_int32(nooverwritepercent, 60, static const bool FLAGS_nooverwritepercent_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_nooverwritepercent, &ValidateInt32Percent); -DEFINE_int32(iterpercent, 10, "Ratio of iterations to total workload" +DEFINE_int32(iterpercent, 10, + "Ratio of iterations to total workload" " (expressed as a percentage)"); static const bool FLAGS_iterpercent_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_iterpercent, &ValidateInt32Percent); @@ -507,7 +511,7 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { return rocksdb::kZSTD; fprintf(stderr, "Cannot parse compression type '%s'\n", ctype); - return rocksdb::kSnappyCompression; //default value + return rocksdb::kSnappyCompression; // default value } enum rocksdb::ChecksumType StringToChecksumType(const char* ctype) { @@ -570,11 +574,7 @@ DEFINE_uint64(max_manifest_file_size, 16384, "Maximum size of a MANIFEST file"); DEFINE_bool(in_place_update, false, "On true, does inplace update in memtable"); -enum RepFactory { - kSkipList, - kHashSkipList, - kVectorRep -}; +enum RepFactory { kSkipList, kHashSkipList, kVectorRep }; namespace { enum RepFactory StringToRepFactory(const char* ctype) { @@ -607,7 +607,8 @@ DEFINE_int32(prefix_size, 7, "Control the prefix size for HashSkipListRep"); static const bool FLAGS_prefix_size_dummy __attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize); -DEFINE_bool(use_merge, false, "On true, replaces all writes with a Merge " +DEFINE_bool(use_merge, false, + "On true, replaces all writes with a Merge " "that behaves like a Put"); DEFINE_bool(use_full_merge_v1, false, @@ -623,7 +624,7 @@ static std::string Key(int64_t val) { PutFixed64(&little_endian_key, val); assert(little_endian_key.size() == sizeof(val)); big_endian_key.resize(sizeof(val)); - for (size_t i = 0 ; i < sizeof(val); ++i) { + for (size_t i = 0; i < sizeof(val); ++i) { big_endian_key[i] = little_endian_key[sizeof(val) - 1 - i]; } return big_endian_key; @@ -635,7 +636,6 @@ static std::string StringToHex(const std::string& str) { return result; } - class StressTest; namespace { @@ -643,7 +643,7 @@ class Stats { private: uint64_t start_; uint64_t finish_; - double seconds_; + double seconds_; long done_; long gets_; long prefixes_; @@ -664,7 +664,7 @@ class Stats { HistogramImpl hist_; public: - Stats() { } + Stats() {} void Start() { next_report_ = 100; @@ -728,16 +728,23 @@ class Stats { last_op_finish_ = now; } - done_++; + done_++; if (FLAGS_progress_reports) { if (done_ >= next_report_) { - if (next_report_ < 1000) next_report_ += 100; - else if (next_report_ < 5000) next_report_ += 500; - else if (next_report_ < 10000) next_report_ += 1000; - else if (next_report_ < 50000) next_report_ += 5000; - else if (next_report_ < 100000) next_report_ += 10000; - else if (next_report_ < 500000) next_report_ += 50000; - else next_report_ += 100000; + if (next_report_ < 1000) + next_report_ += 100; + else if (next_report_ < 5000) + next_report_ += 500; + else if (next_report_ < 10000) + next_report_ += 1000; + else if (next_report_ < 50000) + next_report_ += 5000; + else if (next_report_ < 100000) + next_report_ += 10000; + else if (next_report_ < 500000) + next_report_ += 50000; + else + next_report_ += 100000; fprintf(stdout, "... finished %ld ops%30s\r", done_, ""); } } @@ -758,27 +765,17 @@ class Stats { iterator_size_sums_ += count; } - void AddIterations(int n) { - iterations_ += n; - } + void AddIterations(int n) { iterations_ += n; } - void AddDeletes(int n) { - deletes_ += n; - } + void AddDeletes(int n) { deletes_ += n; } void AddSingleDeletes(size_t n) { single_deletes_ += n; } - void AddRangeDeletions(int n) { - range_deletions_ += n; - } + void AddRangeDeletions(int n) { range_deletions_ += n; } - void AddCoveredByRangeDeletions(int n) { - covered_by_range_deletions_ += n; - } + void AddCoveredByRangeDeletions(int n) { covered_by_range_deletions_ += n; } - void AddErrors(int n) { - errors_ += n; - } + void AddErrors(int n) { errors_ += n; } void AddNumCompactFilesSucceed(int n) { num_compact_files_succeed_ += n; } @@ -794,19 +791,19 @@ class Stats { double elapsed = (finish_ - start_) * 1e-6; double bytes_mb = bytes_ / 1048576.0; double rate = bytes_mb / elapsed; - double throughput = (double)done_/elapsed; + double throughput = (double)done_ / elapsed; fprintf(stdout, "%-12s: ", name); - fprintf(stdout, "%.3f micros/op %ld ops/sec\n", - seconds_ * 1e6 / done_, (long)throughput); + fprintf(stdout, "%.3f micros/op %ld ops/sec\n", seconds_ * 1e6 / done_, + (long)throughput); fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n", - "", bytes_mb, rate, (100*writes_)/done_, done_); + "", bytes_mb, rate, (100 * writes_) / done_, done_); fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_); fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_); fprintf(stdout, "%-12s: Single deleted %" ROCKSDB_PRIszt " times\n", "", - single_deletes_); - fprintf(stdout, "%-12s: %ld read and %ld found the key\n", "", - gets_, founds_); + single_deletes_); + fprintf(stdout, "%-12s: %ld read and %ld found the key\n", "", gets_, + founds_); fprintf(stdout, "%-12s: Prefix scanned %ld times\n", "", prefixes_); fprintf(stdout, "%-12s: Iterator size sum is %ld\n", "", iterator_size_sums_); @@ -862,7 +859,7 @@ class SharedState { Random64 rnd(seed_); // Start with the identity permutation. Subsequent iterations of // for loop below will start with perm of previous for loop - int64_t *permutation = new int64_t[max_key_]; + int64_t* permutation = new int64_t[max_key_]; for (int64_t i = 0; i < max_key_; i++) { permutation[i] = i; } @@ -918,8 +915,8 @@ class SharedState { } if (status.ok()) { assert(expected_mmap_buffer_->GetLen() == expected_values_size); - values_ = - static_cast*>(expected_mmap_buffer_->GetBase()); + values_ = static_cast*>( + expected_mmap_buffer_->GetBase()); assert(values_ != nullptr); } else { fprintf(stderr, "Failed opening shared file '%s' with error: %s\n", @@ -964,73 +961,39 @@ class SharedState { ~SharedState() {} - port::Mutex* GetMutex() { - return &mu_; - } + port::Mutex* GetMutex() { return &mu_; } - port::CondVar* GetCondVar() { - return &cv_; - } + port::CondVar* GetCondVar() { return &cv_; } - StressTest* GetStressTest() const { - return stress_test_; - } + StressTest* GetStressTest() const { return stress_test_; } - int64_t GetMaxKey() const { - return max_key_; - } + int64_t GetMaxKey() const { return max_key_; } - uint32_t GetNumThreads() const { - return num_threads_; - } + uint32_t GetNumThreads() const { return num_threads_; } - void IncInitialized() { - num_initialized_++; - } + void IncInitialized() { num_initialized_++; } - void IncOperated() { - num_populated_++; - } + void IncOperated() { num_populated_++; } - void IncDone() { - num_done_++; - } + void IncDone() { num_done_++; } - void IncVotedReopen() { - vote_reopen_ = (vote_reopen_ + 1) % num_threads_; - } + void IncVotedReopen() { vote_reopen_ = (vote_reopen_ + 1) % num_threads_; } - bool AllInitialized() const { - return num_initialized_ >= num_threads_; - } + bool AllInitialized() const { return num_initialized_ >= num_threads_; } - bool AllOperated() const { - return num_populated_ >= num_threads_; - } + bool AllOperated() const { return num_populated_ >= num_threads_; } - bool AllDone() const { - return num_done_ >= num_threads_; - } + bool AllDone() const { return num_done_ >= num_threads_; } - bool AllVotedReopen() { - return (vote_reopen_ == 0); - } + bool AllVotedReopen() { return (vote_reopen_ == 0); } - void SetStart() { - start_ = true; - } + void SetStart() { start_ = true; } - void SetStartVerify() { - start_verify_ = true; - } + void SetStartVerify() { start_verify_ = true; } - bool Started() const { - return start_; - } + bool Started() const { return start_; } - bool VerifyStarted() const { - return start_verify_; - } + bool VerifyStarted() const { return start_verify_; } void SetVerificationFailure() { verification_failure_.store(true); } @@ -1197,7 +1160,8 @@ class DbStressListener : public EventListener { DbStressListener(const std::string& db_name, const std::vector& db_paths, const std::vector& column_families) - : db_name_(db_name), db_paths_(db_paths), + : db_name_(db_name), + db_paths_(db_paths), column_families_(column_families) {} virtual ~DbStressListener() {} #ifndef ROCKSDB_LITE @@ -1264,7 +1228,7 @@ class DbStressListener : public EventListener { for (auto& cf : column_families_) { for (const auto& cf_path : cf.options.cf_paths) { if (cf_path.path == file_dir) { - return; + return; } } } @@ -1422,7 +1386,9 @@ class StressTest { }}, {"target_file_size_multiplier", { - ToString(options_.target_file_size_multiplier), "1", "2", + ToString(options_.target_file_size_multiplier), + "1", + "2", }}, {"max_bytes_for_level_base", { @@ -1432,7 +1398,9 @@ class StressTest { }}, {"max_bytes_for_level_multiplier", { - ToString(options_.max_bytes_for_level_multiplier), "1", "2", + ToString(options_.max_bytes_for_level_multiplier), + "1", + "2", }}, {"max_sequential_skip_in_iterations", {"4", "8", "12"}}, }; @@ -1487,7 +1455,7 @@ class StressTest { now = FLAGS_env->NowMicros(); fprintf(stdout, "%s Starting database operations\n", - FLAGS_env->TimeToString(now/1000000).c_str()); + FLAGS_env->TimeToString(now / 1000000).c_str()); shared.SetStart(); shared.GetCondVar()->SignalAll(); @@ -1498,10 +1466,10 @@ class StressTest { now = FLAGS_env->NowMicros(); if (FLAGS_test_batches_snapshots) { fprintf(stdout, "%s Limited verification already done during gets\n", - FLAGS_env->TimeToString((uint64_t) now/1000000).c_str()); + FLAGS_env->TimeToString((uint64_t)now / 1000000).c_str()); } else { fprintf(stdout, "%s Starting verification\n", - FLAGS_env->TimeToString((uint64_t) now/1000000).c_str()); + FLAGS_env->TimeToString((uint64_t)now / 1000000).c_str()); } shared.SetStartVerify(); @@ -1523,7 +1491,7 @@ class StressTest { now = FLAGS_env->NowMicros(); if (!FLAGS_test_batches_snapshots && !shared.HasVerificationFailedYet()) { fprintf(stdout, "%s Verification successful\n", - FLAGS_env->TimeToString(now/1000000).c_str()); + FLAGS_env->TimeToString(now / 1000000).c_str()); } PrintStatistics(); @@ -1617,7 +1585,7 @@ class StressTest { } static void PrintKeyValue(int cf, uint64_t key, const char* value, - size_t sz) { + size_t sz) { if (!FLAGS_verbose) { return; } @@ -1628,8 +1596,8 @@ class StressTest { snprintf(buf, 4, "%X", value[i]); tmp.append(buf); } - fprintf(stdout, "[CF %d] %" PRIi64 " == > (%" ROCKSDB_PRIszt ") %s\n", - cf, key, sz, tmp.c_str()); + fprintf(stdout, "[CF %d] %" PRIi64 " == > (%" ROCKSDB_PRIszt ") %s\n", cf, + key, sz, tmp.c_str()); } static int64_t GenerateOneKey(ThreadState* thread, uint64_t iteration) { @@ -1640,17 +1608,17 @@ class StressTest { return base_key + thread->rand.Next() % FLAGS_active_width; } - static size_t GenerateValue(uint32_t rand, char *v, size_t max_sz) { + static size_t GenerateValue(uint32_t rand, char* v, size_t max_sz) { size_t value_sz = ((rand % kRandomValueMaxFactor) + 1) * FLAGS_value_size_mult; assert(value_sz <= max_sz && value_sz >= sizeof(uint32_t)); - (void) max_sz; + (void)max_sz; *((uint32_t*)v) = rand; - for (size_t i=sizeof(uint32_t); i < value_sz; i++) { + for (size_t i = sizeof(uint32_t); i < value_sz; i++) { v[i] = (char)(rand ^ i); } v[value_sz] = '\0'; - return value_sz; // the size of the value set. + return value_sz; // the size of the value set. } Status AssertSame(DB* db, ColumnFamilyHandle* cf, @@ -1688,8 +1656,8 @@ class StressTest { Status SetOptions(ThreadState* thread) { assert(FLAGS_set_options_one_in > 0); std::unordered_map opts; - std::string name = options_index_[ - thread->rand.Next() % options_index_.size()]; + std::string name = + options_index_[thread->rand.Next() % options_index_.size()]; int value_idx = thread->rand.Next() % options_table_[name].size(); if (name == "soft_rate_limit" || name == "hard_rate_limit") { opts["soft_rate_limit"] = options_table_["soft_rate_limit"][value_idx]; @@ -1698,11 +1666,11 @@ class StressTest { name == "level0_slowdown_writes_trigger" || name == "level0_stop_writes_trigger") { opts["level0_file_num_compaction_trigger"] = - options_table_["level0_file_num_compaction_trigger"][value_idx]; + options_table_["level0_file_num_compaction_trigger"][value_idx]; opts["level0_slowdown_writes_trigger"] = - options_table_["level0_slowdown_writes_trigger"][value_idx]; + options_table_["level0_slowdown_writes_trigger"][value_idx]; opts["level0_stop_writes_trigger"] = - options_table_["level0_stop_writes_trigger"][value_idx]; + options_table_["level0_stop_writes_trigger"][value_idx]; } else { opts[name] = options_table_[name][value_idx]; } @@ -1771,8 +1739,7 @@ class StressTest { if (thread->shared->AllVotedReopen()) { thread->shared->GetStressTest()->Reopen(); thread->shared->GetCondVar()->SignalAll(); - } - else { + } else { thread->shared->GetCondVar()->Wait(); } // Commenting this out as we don't want to reset stats on each open. @@ -1886,7 +1853,7 @@ class StressTest { } } } -#endif // !ROCKSDB_LITE +#endif // !ROCKSDB_LITE int64_t rand_key = GenerateOneKey(thread, i); int rand_column_family = thread->rand.Next() % FLAGS_column_families; std::string keystr = Key(rand_key); @@ -1943,7 +1910,7 @@ class StressTest { snap_state); } while (!thread->snapshot_queue.empty() && - i == thread->snapshot_queue.front().first) { + i == thread->snapshot_queue.front().first) { auto snap_state = thread->snapshot_queue.front().second; assert(snap_state.snapshot); // Note: this is unsafe as the cf might be dropped concurrently. But it @@ -1972,14 +1939,14 @@ class StressTest { } else if (prefixBound <= prob_op && prob_op < writeBound) { // OPERATION write TestPut(thread, write_opts, read_opts, {rand_column_family}, {rand_key}, - value, lock); + value, lock); } else if (writeBound <= prob_op && prob_op < delBound) { // OPERATION delete TestDelete(thread, write_opts, {rand_column_family}, {rand_key}, lock); } else if (delBound <= prob_op && prob_op < delRangeBound) { // OPERATION delete range TestDeleteRange(thread, write_opts, {rand_column_family}, {rand_key}, - lock); + lock); } else { // OPERATION iterate TestIterate(thread, read_opts, {rand_column_family}, {rand_key}); @@ -1996,31 +1963,31 @@ class StressTest { virtual bool ShouldAcquireMutexOnKey() const { return false; } - virtual Status TestGet(ThreadState* thread, - const ReadOptions& read_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys) = 0; + virtual Status TestGet(ThreadState* thread, const ReadOptions& read_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys) = 0; virtual Status TestPrefixScan(ThreadState* thread, - const ReadOptions& read_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys) = 0; + const ReadOptions& read_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys) = 0; - virtual Status TestPut(ThreadState* thread, - WriteOptions& write_opts, const ReadOptions& read_opts, - const std::vector& cf_ids, const std::vector& keys, - char (&value)[kValueMaxLen], std::unique_ptr& lock) = 0; + virtual Status TestPut(ThreadState* thread, WriteOptions& write_opts, + const ReadOptions& read_opts, + const std::vector& cf_ids, + const std::vector& keys, + char (&value)[kValueMaxLen], + std::unique_ptr& lock) = 0; virtual Status TestDelete(ThreadState* thread, WriteOptions& write_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys, - std::unique_ptr& lock) = 0; + const std::vector& rand_column_families, + const std::vector& rand_keys, + std::unique_ptr& lock) = 0; - virtual Status TestDeleteRange(ThreadState* thread, - WriteOptions& write_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys, - std::unique_ptr& lock) = 0; + virtual Status TestDeleteRange(ThreadState* thread, WriteOptions& write_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys, + std::unique_ptr& lock) = 0; virtual void TestIngestExternalFile( ThreadState* thread, const std::vector& rand_column_families, @@ -2029,10 +1996,9 @@ class StressTest { // Given a key K, this creates an iterator which scans to K and then // does a random sequence of Next/Prev operations. - virtual Status TestIterate(ThreadState* thread, - const ReadOptions& read_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys) { + virtual Status TestIterate(ThreadState* thread, const ReadOptions& read_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys) { Status s; const Snapshot* snapshot = db_->GetSnapshot(); ReadOptions readoptionscopy = read_opts; @@ -2070,8 +2036,8 @@ class StressTest { void VerificationAbort(SharedState* shared, std::string msg, int cf, int64_t key) const { - printf("Verification failed for column family %d key %" PRIi64 ": %s\n", cf, key, - msg.c_str()); + printf("Verification failed for column family %d key %" PRIi64 ": %s\n", cf, + key, msg.c_str()); shared->SetVerificationFailure(); } @@ -2098,7 +2064,8 @@ class StressTest { fprintf(stdout, "Prefix percentage : %d%%\n", FLAGS_prefixpercent); fprintf(stdout, "Write percentage : %d%%\n", FLAGS_writepercent); fprintf(stdout, "Delete percentage : %d%%\n", FLAGS_delpercent); - fprintf(stdout, "Delete range percentage : %d%%\n", FLAGS_delrangepercent); + fprintf(stdout, "Delete range percentage : %d%%\n", + FLAGS_delrangepercent); fprintf(stdout, "No overwrite percentage : %d%%\n", FLAGS_nooverwritepercent); fprintf(stdout, "Iterate percentage : %d%%\n", FLAGS_iterpercent); @@ -2424,7 +2391,7 @@ class StressTest { num_times_reopened_++; auto now = FLAGS_env->NowMicros(); fprintf(stdout, "%s Reopening database for the %dth time\n", - FLAGS_env->TimeToString(now/1000000).c_str(), + FLAGS_env->TimeToString(now / 1000000).c_str(), num_times_reopened_); Open(); } @@ -2447,7 +2414,7 @@ class StressTest { std::vector column_family_names_; std::atomic new_column_family_name_; int num_times_reopened_; - std::unordered_map> options_table_; + std::unordered_map > options_table_; std::vector options_index_; }; @@ -2538,8 +2505,7 @@ class NonBatchedOpsStressTest : public StressTest { if (thread->rand.OneIn(FLAGS_clear_column_family_one_in)) { // drop column family and then create it again (can't drop default) int cf = thread->rand.Next() % (FLAGS_column_families - 1) + 1; - std::string new_name = - ToString(new_column_family_name_.fetch_add(1)); + std::string new_name = ToString(new_column_family_name_.fetch_add(1)); { MutexLock l(thread->shared->GetMutex()); fprintf( @@ -2558,7 +2524,7 @@ class NonBatchedOpsStressTest : public StressTest { delete column_families_[cf]; if (!s.ok()) { fprintf(stderr, "dropping column family error: %s\n", - s.ToString().c_str()); + s.ToString().c_str()); std::terminate(); } if (FLAGS_use_titandb) { @@ -2581,7 +2547,7 @@ class NonBatchedOpsStressTest : public StressTest { thread->shared->ClearColumnFamily(cf); if (!s.ok()) { fprintf(stderr, "creating column family error: %s\n", - s.ToString().c_str()); + s.ToString().c_str()); std::terminate(); } thread->shared->UnlockColumnFamily(cf); @@ -2591,10 +2557,9 @@ class NonBatchedOpsStressTest : public StressTest { virtual bool ShouldAcquireMutexOnKey() const { return true; } - virtual Status TestGet(ThreadState* thread, - const ReadOptions& read_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys) { + virtual Status TestGet(ThreadState* thread, const ReadOptions& read_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys) { auto cfh = column_families_[rand_column_families[0]]; std::string key_str = Key(rand_keys[0]); Slice key = key_str; @@ -2614,21 +2579,20 @@ class NonBatchedOpsStressTest : public StressTest { } virtual Status TestPrefixScan(ThreadState* thread, - const ReadOptions& read_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys) { + const ReadOptions& read_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys) { auto cfh = column_families_[rand_column_families[0]]; std::string key_str = Key(rand_keys[0]); Slice key = key_str; Slice prefix = Slice(key.data(), FLAGS_prefix_size); Iterator* iter = db_->NewIterator(read_opts, cfh); int64_t count = 0; - for (iter->Seek(prefix); - iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); + iter->Next()) { ++count; } - assert(count <= - (static_cast(1) << ((8 - FLAGS_prefix_size) * 8))); + assert(count <= (static_cast(1) << ((8 - FLAGS_prefix_size) * 8))); Status s = iter->status(); if (iter->status().ok()) { thread->stats.AddPrefixes(1, static_cast(count)); @@ -2639,11 +2603,12 @@ class NonBatchedOpsStressTest : public StressTest { return s; } - virtual Status TestPut(ThreadState* thread, - WriteOptions& write_opts, const ReadOptions& read_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys, - char (&value) [kValueMaxLen], std::unique_ptr& lock) { + virtual Status TestPut(ThreadState* thread, WriteOptions& write_opts, + const ReadOptions& read_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys, + char (&value)[kValueMaxLen], + std::unique_ptr& lock) { auto shared = thread->shared; int64_t max_key = shared->GetMaxKey(); int64_t rand_key = rand_keys[0]; @@ -2653,8 +2618,8 @@ class NonBatchedOpsStressTest : public StressTest { lock.reset(); rand_key = thread->rand.Next() % max_key; rand_column_family = thread->rand.Next() % FLAGS_column_families; - lock.reset(new MutexLock( - shared->GetMutexForKey(rand_column_family, rand_key))); + lock.reset( + new MutexLock(shared->GetMutexForKey(rand_column_family, rand_key))); } std::string key_str = Key(rand_key); @@ -2666,8 +2631,8 @@ class NonBatchedOpsStressTest : public StressTest { Slice k = key_str2; std::string from_db; Status s = db_->Get(read_opts, cfh, k, &from_db); - if (!VerifyValue(rand_column_family, rand_key, read_opts, shared, - from_db, s, true)) { + if (!VerifyValue(rand_column_family, rand_key, read_opts, shared, from_db, + s, true)) { return s; } } @@ -2713,15 +2678,15 @@ class NonBatchedOpsStressTest : public StressTest { std::terminate(); } thread->stats.AddBytesForWrites(1, sz); - PrintKeyValue(rand_column_family, static_cast(rand_key), - value, sz); + PrintKeyValue(rand_column_family, static_cast(rand_key), value, + sz); return s; } virtual Status TestDelete(ThreadState* thread, WriteOptions& write_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys, - std::unique_ptr& lock) { + const std::vector& rand_column_families, + const std::vector& rand_keys, + std::unique_ptr& lock) { int64_t rand_key = rand_keys[0]; int rand_column_family = rand_column_families[0]; auto shared = thread->shared; @@ -2735,8 +2700,8 @@ class NonBatchedOpsStressTest : public StressTest { lock.reset(); rand_key = thread->rand.Next() % max_key; rand_column_family = thread->rand.Next() % FLAGS_column_families; - lock.reset(new MutexLock( - shared->GetMutexForKey(rand_column_family, rand_key))); + lock.reset( + new MutexLock(shared->GetMutexForKey(rand_column_family, rand_key))); } std::string key_str = Key(rand_key); @@ -2789,21 +2754,19 @@ class NonBatchedOpsStressTest : public StressTest { shared->SingleDelete(rand_column_family, rand_key, false /* pending */); thread->stats.AddSingleDeletes(1); if (!s.ok()) { - fprintf(stderr, "single delete error: %s\n", - s.ToString().c_str()); + fprintf(stderr, "single delete error: %s\n", s.ToString().c_str()); std::terminate(); } } return s; } - virtual Status TestDeleteRange(ThreadState* thread, - WriteOptions& write_opts, - const std::vector& rand_column_families, - const std::vector& rand_keys, - std::unique_ptr& lock) { + virtual Status TestDeleteRange(ThreadState* thread, WriteOptions& write_opts, + const std::vector& rand_column_families, + const std::vector& rand_keys, + std::unique_ptr& lock) { // OPERATION delete range - std::vector> range_locks; + std::vector > range_locks; // delete range does not respect disallowed overwrites. the keys for // which overwrites are disallowed are randomly distributed so it // could be expensive to find a range where each key allows @@ -2814,17 +2777,17 @@ class NonBatchedOpsStressTest : public StressTest { int64_t max_key = shared->GetMaxKey(); if (rand_key > max_key - FLAGS_range_deletion_width) { lock.reset(); - rand_key = thread->rand.Next() % - (max_key - FLAGS_range_deletion_width + 1); - range_locks.emplace_back(new MutexLock( - shared->GetMutexForKey(rand_column_family, rand_key))); + rand_key = + thread->rand.Next() % (max_key - FLAGS_range_deletion_width + 1); + range_locks.emplace_back( + new MutexLock(shared->GetMutexForKey(rand_column_family, rand_key))); } else { range_locks.emplace_back(std::move(lock)); } for (int j = 1; j < FLAGS_range_deletion_width; ++j) { if (((rand_key + j) & ((1 << FLAGS_log2_keys_per_lock) - 1)) == 0) { range_locks.emplace_back(new MutexLock( - shared->GetMutexForKey(rand_column_family, rand_key + j))); + shared->GetMutexForKey(rand_column_family, rand_key + j))); } } shared->DeleteRange(rand_column_family, rand_key, @@ -2838,13 +2801,12 @@ class NonBatchedOpsStressTest : public StressTest { Slice end_key = end_keystr; Status s = db_->DeleteRange(write_opts, cfh, key, end_key); if (!s.ok()) { - fprintf(stderr, "delete range error: %s\n", - s.ToString().c_str()); + fprintf(stderr, "delete range error: %s\n", s.ToString().c_str()); std::terminate(); } - int covered = shared->DeleteRange( - rand_column_family, rand_key, - rand_key + FLAGS_range_deletion_width, false /* pending */); + int covered = shared->DeleteRange(rand_column_family, rand_key, + rand_key + FLAGS_range_deletion_width, + false /* pending */); thread->stats.AddRangeDeletions(1); thread->stats.AddCoveredByRangeDeletions(covered); return s; @@ -2975,18 +2937,18 @@ class BatchedOpsStressTest : public StressTest { // Given a key K and value V, this puts ("0"+K, "0"+V), ("1"+K, "1"+V), ... // ("9"+K, "9"+V) in DB atomically i.e in a single batch. // Also refer BatchedOpsStressTest::TestGet - virtual Status TestPut(ThreadState* thread, - WriteOptions& write_opts, const ReadOptions& /* read_opts */, - const std::vector& rand_column_families, const std::vector& rand_keys, - char (&value)[kValueMaxLen], std::unique_ptr& /* lock */) { + virtual Status TestPut(ThreadState* thread, WriteOptions& write_opts, + const ReadOptions& /* read_opts */, + const std::vector& rand_column_families, + const std::vector& rand_keys, + char (&value)[kValueMaxLen], + std::unique_ptr& /* lock */) { uint32_t value_base = thread->rand.Next() % thread->shared->UNKNOWN_SENTINEL; size_t sz = GenerateValue(value_base, value, sizeof(value)); Slice v(value, sz); - std::string keys[10] = {"9", "8", "7", "6", "5", - "4", "3", "2", "1", "0"}; - std::string values[10] = {"9", "8", "7", "6", "5", - "4", "3", "2", "1", "0"}; + std::string keys[10] = {"9", "8", "7", "6", "5", "4", "3", "2", "1", "0"}; + std::string values[10] = {"9", "8", "7", "6", "5", "4", "3", "2", "1", "0"}; Slice value_slices[10]; WriteBatch batch; Status s; @@ -3018,11 +2980,10 @@ class BatchedOpsStressTest : public StressTest { // Given a key K, this deletes ("0"+K), ("1"+K),... ("9"+K) // in DB atomically i.e in a single batch. Also refer MultiGet. virtual Status TestDelete(ThreadState* thread, WriteOptions& writeoptions, - const std::vector& rand_column_families, - const std::vector& rand_keys, - std::unique_ptr& /* lock */) { - std::string keys[10] = {"9", "7", "5", "3", "1", - "8", "6", "4", "2", "0"}; + const std::vector& rand_column_families, + const std::vector& rand_keys, + std::unique_ptr& /* lock */) { + std::string keys[10] = {"9", "7", "5", "3", "1", "8", "6", "4", "2", "0"}; WriteBatch batch; Status s; @@ -3044,13 +3005,14 @@ class BatchedOpsStressTest : public StressTest { return s; } - virtual Status TestDeleteRange(ThreadState* /* thread */, - WriteOptions& /* write_opts */, + virtual Status TestDeleteRange( + ThreadState* /* thread */, WriteOptions& /* write_opts */, const std::vector& /* rand_column_families */, const std::vector& /* rand_keys */, std::unique_ptr& /* lock */) { assert(false); - return Status::NotSupported("BatchedOpsStressTest does not support " + return Status::NotSupported( + "BatchedOpsStressTest does not support " "TestDeleteRange"); } @@ -3072,8 +3034,8 @@ class BatchedOpsStressTest : public StressTest { // ASSUMES that BatchedOpsStressTest::TestPut was used to put (K, V) into // the DB. virtual Status TestGet(ThreadState* thread, const ReadOptions& readoptions, - const std::vector& rand_column_families, - const std::vector& rand_keys) { + const std::vector& rand_column_families, + const std::vector& rand_keys) { std::string keys[10] = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}; Slice key_slices[10]; std::string values[10]; @@ -3106,7 +3068,7 @@ class BatchedOpsStressTest : public StressTest { fprintf(stderr, "error expected prefix = %c actual = %c\n", expected_prefix, actual_prefix); } - (values[i])[0] = ' '; // blank out the differing character + (values[i])[0] = ' '; // blank out the differing character thread->stats.AddGets(1, 1); } } @@ -3118,8 +3080,8 @@ class BatchedOpsStressTest : public StressTest { fprintf(stderr, "error : inconsistent values for key %s: %s, %s\n", key.ToString(true).c_str(), StringToHex(values[0]).c_str(), StringToHex(values[i]).c_str()); - // we continue after error rather than exiting so that we can - // find more errors if any + // we continue after error rather than exiting so that we can + // find more errors if any } } @@ -3132,9 +3094,10 @@ class BatchedOpsStressTest : public StressTest { // each series should be the same length, and it is verified for each // index i that all the i'th values are of the form "0"+V, "1"+V,..."9"+V. // ASSUMES that MultiPut was used to put (K, V) - virtual Status TestPrefixScan(ThreadState* thread, const ReadOptions& readoptions, - const std::vector& rand_column_families, - const std::vector& rand_keys) { + virtual Status TestPrefixScan(ThreadState* thread, + const ReadOptions& readoptions, + const std::vector& rand_column_families, + const std::vector& rand_keys) { std::string key_str = Key(rand_keys[0]); Slice key = key_str; auto cfh = column_families_[rand_column_families[0]]; @@ -3173,13 +3136,14 @@ class BatchedOpsStressTest : public StressTest { fprintf(stderr, "error expected first = %c actual = %c\n", expected_first, actual_first); } - (values[i])[0] = ' '; // blank out the differing character + (values[i])[0] = ' '; // blank out the differing character } // make sure all values are equivalent for (int i = 0; i < 10; i++) { if (values[i] != values[0]) { - fprintf(stderr, "error : %d, inconsistent values for prefix %s: %s, %s\n", - i, prefixes[i].c_str(), StringToHex(values[0]).c_str(), + fprintf(stderr, + "error : %d, inconsistent values for prefix %s: %s, %s\n", i, + prefixes[i].c_str(), StringToHex(values[0]).c_str(), StringToHex(values[i]).c_str()); // we continue after error rather than exiting so that we can // find more errors if any @@ -3221,10 +3185,10 @@ int main(int argc, char** argv) { dbstats = rocksdb::CreateDBStatistics(); } FLAGS_compression_type_e = - StringToCompressionType(FLAGS_compression_type.c_str()); + StringToCompressionType(FLAGS_compression_type.c_str()); FLAGS_checksum_type_e = StringToChecksumType(FLAGS_checksum_type.c_str()); if (!FLAGS_hdfs.empty()) { - FLAGS_env = new rocksdb::HdfsEnv(FLAGS_hdfs); + FLAGS_env = new rocksdb::HdfsEnv(FLAGS_hdfs); } FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str()); @@ -3251,29 +3215,28 @@ int main(int argc, char** argv) { "memtable_prefix_bloom_size_ratio\n"); exit(1); } - if ((FLAGS_readpercent + FLAGS_prefixpercent + - FLAGS_writepercent + FLAGS_delpercent + FLAGS_delrangepercent + - FLAGS_iterpercent) != 100) { - fprintf(stderr, - "Error: Read+Prefix+Write+Delete+DeleteRange+Iterate percents != " - "100!\n"); - exit(1); + if ((FLAGS_readpercent + FLAGS_prefixpercent + FLAGS_writepercent + + FLAGS_delpercent + FLAGS_delrangepercent + FLAGS_iterpercent) != 100) { + fprintf(stderr, + "Error: Read+Prefix+Write+Delete+DeleteRange+Iterate percents != " + "100!\n"); + exit(1); } if (FLAGS_disable_wal == 1 && FLAGS_reopen > 0) { fprintf(stderr, "Error: Db cannot reopen safely with disable_wal set!\n"); exit(1); } if ((unsigned)FLAGS_reopen >= FLAGS_ops_per_thread) { - fprintf(stderr, - "Error: #DB-reopens should be < ops_per_thread\n" - "Provided reopens = %d and ops_per_thread = %lu\n", - FLAGS_reopen, - (unsigned long)FLAGS_ops_per_thread); - exit(1); + fprintf(stderr, + "Error: #DB-reopens should be < ops_per_thread\n" + "Provided reopens = %d and ops_per_thread = %lu\n", + FLAGS_reopen, (unsigned long)FLAGS_ops_per_thread); + exit(1); } if (FLAGS_test_batches_snapshots && FLAGS_delrangepercent > 0) { - fprintf(stderr, "Error: nonzero delrangepercent unsupported in " - "test_batches_snapshots mode\n"); + fprintf(stderr, + "Error: nonzero delrangepercent unsupported in " + "test_batches_snapshots mode\n"); exit(1); } if (FLAGS_active_width > FLAGS_max_key) { @@ -3301,10 +3264,10 @@ int main(int argc, char** argv) { // Choose a location for the test database if none given with --db= if (FLAGS_db.empty()) { - std::string default_db_path; - rocksdb::Env::Default()->GetTestDirectory(&default_db_path); - default_db_path += "/dbstress"; - FLAGS_db = default_db_path; + std::string default_db_path; + rocksdb::Env::Default()->GetTestDirectory(&default_db_path); + default_db_path += "/dbstress"; + FLAGS_db = default_db_path; } rocksdb_kill_odds = FLAGS_kill_random_test;