Skip to content

Commit

Permalink
handle comments
Browse files Browse the repository at this point in the history
  • Loading branch information
dingxiaoshuai123 committed May 6, 2024
1 parent a85d6cf commit a4020b0
Show file tree
Hide file tree
Showing 8 changed files with 33 additions and 25 deletions.
6 changes: 3 additions & 3 deletions src/db.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num)
abort();
}

opened_.store(true);
opened_ = true;
INFO("Open DB{} success!", db_index_);
}

Expand Down Expand Up @@ -83,7 +83,6 @@ void DB::CreateCheckpoint(const std::string& path, bool sync) {
}

void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) {
opened_.store(false);
auto checkpoint_path = path + '/' + std::to_string(db_index_);
if (0 != pstd::IsDir(path)) {
WARN("Checkpoint dir {} does not exist!", checkpoint_path);
Expand All @@ -97,6 +96,7 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) {
}

std::lock_guard<std::shared_mutex> lock(storage_mutex_);
opened_ = false;
std::vector<std::future<void>> result;
result.reserve(rocksdb_inst_num_);
for (int i = 0; i < rocksdb_inst_num_; ++i) {
Expand Down Expand Up @@ -128,7 +128,7 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) {
ERROR("Storage open failed! {}", s.ToString());
abort();
}
opened_.store(true);
opened_ = true;
INFO("DB{} load a checkpoint from {} success!", db_index_, path);
}
} // namespace pikiwidb
2 changes: 1 addition & 1 deletion src/db.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class DB {
*/
std::shared_mutex storage_mutex_;
std::unique_ptr<storage::Storage> storage_;
std::atomic_bool opened_ = false;
bool opened_ = false;
};

} // namespace pikiwidb
2 changes: 1 addition & 1 deletion src/praft/praft_service.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class DummyServiceImpl : public DummyService {
::pikiwidb::DummyResponse* response, ::google::protobuf::Closure* done) override {}

private:
PRaft* praft_;
PRaft* praft_ = nullptr;
};

} // namespace pikiwidb
2 changes: 0 additions & 2 deletions src/storage/src/base_filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
#include "src/base_meta_value_format.h"
#include "src/debug.h"

#include "braft/raft.h"

namespace storage {

class BaseMetaFilter : public rocksdb::CompactionFilter {
Expand Down
2 changes: 1 addition & 1 deletion src/storage/src/batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class RocksBatch : public Batch {

private:
rocksdb::WriteBatch batch_;
rocksdb::DB* db_;
rocksdb::DB* db_ = nullptr;
const rocksdb::WriteOptions& options_;
const std::vector<rocksdb::ColumnFamilyHandle*>& handles_;
};
Expand Down
32 changes: 16 additions & 16 deletions src/storage/src/redis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -146,22 +146,6 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_
zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops));
zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops));

std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops);
// hash CF
column_families.emplace_back("hash_meta_cf", hash_meta_cf_ops);
column_families.emplace_back("hash_data_cf", hash_data_cf_ops);
// set CF
column_families.emplace_back("set_meta_cf", set_meta_cf_ops);
column_families.emplace_back("set_data_cf", set_data_cf_ops);
// list CF
column_families.emplace_back("list_meta_cf", list_meta_cf_ops);
column_families.emplace_back("list_data_cf", list_data_cf_ops);
// zset CF
column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops);
column_families.emplace_back("zset_data_cf", zset_data_cf_ops);
column_families.emplace_back("zset_score_cf", zset_score_cf_ops);

if (append_log_function_) {
// Add log index table property collector factory to each column family
ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string);
Expand All @@ -180,6 +164,22 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_
&handles_, &log_index_collector_, &log_index_of_all_cfs_, storage_options.do_snapshot_function));
}

std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops);
// hash CF
column_families.emplace_back("hash_meta_cf", hash_meta_cf_ops);
column_families.emplace_back("hash_data_cf", hash_data_cf_ops);
// set CF
column_families.emplace_back("set_meta_cf", set_meta_cf_ops);
column_families.emplace_back("set_data_cf", set_data_cf_ops);
// list CF
column_families.emplace_back("list_meta_cf", list_meta_cf_ops);
column_families.emplace_back("list_data_cf", list_data_cf_ops);
// zset CF
column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops);
column_families.emplace_back("zset_data_cf", zset_data_cf_ops);
column_families.emplace_back("zset_score_cf", zset_score_cf_ops);

auto s = rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_);
if (!s.ok()) {
return s;
Expand Down
5 changes: 4 additions & 1 deletion src/storage/src/storage.cc
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) {
return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", tmp_dir);
}

// 2) Create checkpoint of this RocksDB
// 2) Create checkpoint object of this RocksDB
rocksdb::Checkpoint* checkpoint = nullptr;
auto db = insts_[i]->GetDB();
rocksdb::Status s = rocksdb::Checkpoint::Create(db, &checkpoint);
Expand All @@ -180,6 +180,9 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) {
// 4) Make sure the source directory does not exist
if (!pstd::DeleteDirIfExist(source_dir)) {
WARN("DB{}'s RocksDB {} delete directory {} fail!", db_id_, i, source_dir);
if (!pstd::DeleteDirIfExist(tmp_dir)) {
WARN("DB{}'s RocksDB {} fail to delete the temporary directory {} ", db_id_, i, tmp_dir);
}
return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", source_dir);
}

Expand Down
7 changes: 7 additions & 0 deletions tests/consistency_test.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
/*
* Copyright (c) 2024-present, Qihoo, Inc. All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/

package pikiwidb_test

import (
Expand Down

0 comments on commit a4020b0

Please sign in to comment.