Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

osd: add override in osd subsystem #13439

Merged
merged 1 commit into from Feb 15, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/os/bluestore/BitmapFreelistManager.cc
Expand Up @@ -19,14 +19,14 @@ void make_offset_key(uint64_t offset, std::string *key)
}

struct XorMergeOperator : public KeyValueDB::MergeOperator {
virtual void merge_nonexistent(
void merge_nonexistent(
const char *rdata, size_t rlen, std::string *new_value) override {
*new_value = std::string(rdata, rlen);
}
virtual void merge(
void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) {
std::string *new_value) override {
assert(llen == rlen);
*new_value = std::string(ldata, llen);
for (size_t i = 0; i < rlen; ++i) {
Expand All @@ -35,7 +35,7 @@ struct XorMergeOperator : public KeyValueDB::MergeOperator {
}
// We use each operator name and each prefix to construct the
// overall RocksDB operator name for consistency check at open time.
virtual string name() const {
string name() const override {
return "bitwise_xor";
}
};
Expand Down
40 changes: 20 additions & 20 deletions src/os/bluestore/BlueRocksEnv.cc
Expand Up @@ -42,7 +42,7 @@ class BlueRocksSequentialFile : public rocksdb::SequentialFile {
// If an error was encountered, returns a non-OK status.
//
// REQUIRES: External synchronization
rocksdb::Status Read(size_t n, rocksdb::Slice* result, char* scratch) {
rocksdb::Status Read(size_t n, rocksdb::Slice* result, char* scratch) override {
int r = fs->read(h, &h->buf, h->buf.pos, n, NULL, scratch);
assert(r >= 0);
*result = rocksdb::Slice(scratch, r);
Expand All @@ -56,15 +56,15 @@ class BlueRocksSequentialFile : public rocksdb::SequentialFile {
// file, and Skip will return OK.
//
// REQUIRES: External synchronization
rocksdb::Status Skip(uint64_t n) {
rocksdb::Status Skip(uint64_t n) override {
h->buf.skip(n);
return rocksdb::Status::OK();
}

// Remove any kind of caching of data from the offset to offset+length
// of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop.
rocksdb::Status InvalidateCache(size_t offset, size_t length) {
rocksdb::Status InvalidateCache(size_t offset, size_t length) override {
fs->invalidate_cache(h->file, offset, length);
return rocksdb::Status::OK();
}
Expand All @@ -90,7 +90,7 @@ class BlueRocksRandomAccessFile : public rocksdb::RandomAccessFile {
//
// Safe for concurrent use by multiple threads.
rocksdb::Status Read(uint64_t offset, size_t n, rocksdb::Slice* result,
char* scratch) const {
char* scratch) const override {
int r = fs->read_random(h, offset, n, scratch);
assert(r >= 0);
*result = rocksdb::Slice(scratch, r);
Expand All @@ -99,13 +99,13 @@ class BlueRocksRandomAccessFile : public rocksdb::RandomAccessFile {

// Used by the file_reader_writer to decide if the ReadAhead wrapper
// should simply forward the call and do not enact buffering or locking.
bool ShouldForwardRawRequest() const {
bool ShouldForwardRawRequest() const override {
return false;
}

// For cases when read-ahead is implemented in the platform dependent
// layer
void EnableReadAhead() {}
void EnableReadAhead() override {}

// Tries to get an unique ID for this file that will be the same each time
// the file is opened (and will stay the same while the file is open).
Expand All @@ -122,14 +122,14 @@ class BlueRocksRandomAccessFile : public rocksdb::RandomAccessFile {
// a single varint.
//
// Note: these IDs are only valid for the duration of the process.
size_t GetUniqueId(char* id, size_t max_size) const {
size_t GetUniqueId(char* id, size_t max_size) const override {
return snprintf(id, max_size, "%016llx",
(unsigned long long)h->file->fnode.ino);
};

//enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED };

void Hint(AccessPattern pattern) {
void Hint(AccessPattern pattern) override {
if (pattern == RANDOM)
h->buf.max_prefetch = 4096;
else if (pattern == SEQUENTIAL)
Expand All @@ -139,7 +139,7 @@ class BlueRocksRandomAccessFile : public rocksdb::RandomAccessFile {
// Remove any kind of caching of data from the offset to offset+length
// of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop.
rocksdb::Status InvalidateCache(size_t offset, size_t length) {
rocksdb::Status InvalidateCache(size_t offset, size_t length) override {
fs->invalidate_cache(h->file, offset, length);
return rocksdb::Status::OK();
}
Expand Down Expand Up @@ -170,7 +170,7 @@ class BlueRocksWritableFile : public rocksdb::WritableFile {
return c_DefaultPageSize;
}*/

rocksdb::Status Append(const rocksdb::Slice& data) {
rocksdb::Status Append(const rocksdb::Slice& data) override {
h->append(data.data(), data.size());
return rocksdb::Status::OK();
}
Expand All @@ -179,23 +179,23 @@ class BlueRocksWritableFile : public rocksdb::WritableFile {
// to simple append as most of the tests are buffered by default
rocksdb::Status PositionedAppend(
const rocksdb::Slice& /* data */,
uint64_t /* offset */) {
uint64_t /* offset */) override {
return rocksdb::Status::NotSupported();
}

// Truncate is necessary to trim the file to the correct size
// before closing. It is not always possible to keep track of the file
// size due to whole pages writes. The behavior is undefined if called
// with other writes to follow.
rocksdb::Status Truncate(uint64_t size) {
rocksdb::Status Truncate(uint64_t size) override {
// we mirror the posix env, which does nothing here; instead, it
// truncates to the final size on close. whatever!
return rocksdb::Status::OK();
//int r = fs->truncate(h, size);
// return err_to_status(r);
}

rocksdb::Status Close() {
rocksdb::Status Close() override {
Flush();

// mimic posix env, here. shrug.
Expand All @@ -211,19 +211,19 @@ class BlueRocksWritableFile : public rocksdb::WritableFile {
return rocksdb::Status::OK();
}

rocksdb::Status Flush() {
rocksdb::Status Flush() override {
fs->flush(h);
return rocksdb::Status::OK();
}

rocksdb::Status Sync() { // sync data
rocksdb::Status Sync() override { // sync data
fs->fsync(h);
return rocksdb::Status::OK();
}

// true if Sync() and Fsync() are safe to call concurrently with Append()
// and Flush().
bool IsSyncThreadSafe() const {
bool IsSyncThreadSafe() const override {
return true;
}

Expand All @@ -236,12 +236,12 @@ class BlueRocksWritableFile : public rocksdb::WritableFile {
/*
* Get the size of valid data in the file.
*/
uint64_t GetFileSize() {
uint64_t GetFileSize() override {
return h->file->fnode.size + h->buffer.length();;
}

// For documentation, refer to RandomAccessFile::GetUniqueId()
size_t GetUniqueId(char* id, size_t max_size) const {
size_t GetUniqueId(char* id, size_t max_size) const override {
return snprintf(id, max_size, "%016llx",
(unsigned long long)h->file->fnode.ino);
}
Expand All @@ -250,7 +250,7 @@ class BlueRocksWritableFile : public rocksdb::WritableFile {
// of this file. If the length is 0, then it refers to the end of file.
// If the system is not caching the file contents, then this is a noop.
// This call has no effect on dirty pages in the cache.
rocksdb::Status InvalidateCache(size_t offset, size_t length) {
rocksdb::Status InvalidateCache(size_t offset, size_t length) override {
fs->invalidate_cache(h->file, offset, length);
return rocksdb::Status::OK();
}
Expand Down Expand Up @@ -293,7 +293,7 @@ class BlueRocksDirectory : public rocksdb::Directory {
explicit BlueRocksDirectory(BlueFS *f) : fs(f) {}

// Fsync directory. Can be called concurrently from multiple threads.
rocksdb::Status Fsync() {
rocksdb::Status Fsync() override {
// it is sufficient to flush the log.
fs->sync_metadata();
return rocksdb::Status::OK();
Expand Down
8 changes: 4 additions & 4 deletions src/os/bluestore/BlueStore.cc
Expand Up @@ -494,14 +494,14 @@ static void get_wal_key(uint64_t seq, string *out)
// merge operators

struct Int64ArrayMergeOperator : public KeyValueDB::MergeOperator {
virtual void merge_nonexistent(
void merge_nonexistent(
const char *rdata, size_t rlen, std::string *new_value) override {
*new_value = std::string(rdata, rlen);
}
virtual void merge(
void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) {
std::string *new_value) override {
assert(llen == rlen);
assert((rlen % 8) == 0);
new_value->resize(rlen);
Expand All @@ -514,7 +514,7 @@ struct Int64ArrayMergeOperator : public KeyValueDB::MergeOperator {
}
// We use each operator name and each prefix to construct the
// overall RocksDB operator name for consistency check at open time.
virtual string name() const {
string name() const override {
return "int64_array";
}
};
Expand Down
4 changes: 2 additions & 2 deletions src/os/filestore/FileStore.cc
Expand Up @@ -2049,7 +2049,7 @@ struct C_JournaledAhead : public Context {

C_JournaledAhead(FileStore *f, FileStore::OpSequencer *os, FileStore::Op *o, Context *ondisk):
fs(f), osr(os), o(o), ondisk(ondisk) { }
void finish(int r) {
void finish(int r) override {
fs->_journaled_ahead(osr, o, ondisk);
}
};
Expand Down Expand Up @@ -3807,7 +3807,7 @@ class SyncEntryTimeout : public Context {
{
}

void finish(int r) {
void finish(int r) override {
BackTrace *bt = new BackTrace(1);
generic_dout(-1) << "FileStore: sync_entry timed out after "
<< m_commit_timeo << " seconds.\n";
Expand Down
16 changes: 8 additions & 8 deletions src/os/memstore/MemStore.cc
Expand Up @@ -615,39 +615,39 @@ class MemStore::OmapIteratorImpl : public ObjectMap::ObjectMapIteratorImpl {
OmapIteratorImpl(CollectionRef c, ObjectRef o)
: c(c), o(o), it(o->omap.begin()) {}

int seek_to_first() {
int seek_to_first() override {
std::lock_guard<std::mutex>(o->omap_mutex);
it = o->omap.begin();
return 0;
}
int upper_bound(const string &after) {
int upper_bound(const string &after) override {
std::lock_guard<std::mutex>(o->omap_mutex);
it = o->omap.upper_bound(after);
return 0;
}
int lower_bound(const string &to) {
int lower_bound(const string &to) override {
std::lock_guard<std::mutex>(o->omap_mutex);
it = o->omap.lower_bound(to);
return 0;
}
bool valid() {
bool valid() override {
std::lock_guard<std::mutex>(o->omap_mutex);
return it != o->omap.end();
}
int next(bool validate=true) {
int next(bool validate=true) override {
std::lock_guard<std::mutex>(o->omap_mutex);
++it;
return 0;
}
string key() {
string key() override {
std::lock_guard<std::mutex>(o->omap_mutex);
return it->first;
}
bufferlist value() {
bufferlist value() override {
std::lock_guard<std::mutex>(o->omap_mutex);
return it->second;
}
int status() {
int status() override {
return 0;
}
};
Expand Down
12 changes: 6 additions & 6 deletions src/osd/ECBackend.cc
Expand Up @@ -234,7 +234,7 @@ struct OnRecoveryReadComplete :
set<int> want;
OnRecoveryReadComplete(ECBackend *pg, const hobject_t &hoid)
: pg(pg), hoid(hoid) {}
void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) {
void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) override {
ECBackend::read_result_t &res = in.second;
if (!(res.r == 0 && res.errors.empty())) {
pg->_failed_push(hoid, in);
Expand Down Expand Up @@ -450,7 +450,7 @@ struct SendPushReplies : public Context {
map<int, MOSDPGPushReply*> &in) : l(l), epoch(epoch) {
replies.swap(in);
}
void finish(int) {
void finish(int) override {
for (map<int, MOSDPGPushReply*>::iterator i = replies.begin();
i != replies.end();
++i) {
Expand Down Expand Up @@ -799,7 +799,7 @@ struct SubWriteCommitted : public Context {
eversion_t last_complete)
: pg(pg), msg(msg), tid(tid),
version(version), last_complete(last_complete) {}
void finish(int) {
void finish(int) override {
if (msg)
msg->mark_event("sub_op_committed");
pg->sub_write_committed(tid, version, last_complete);
Expand Down Expand Up @@ -842,7 +842,7 @@ struct SubWriteApplied : public Context {
ceph_tid_t tid,
eversion_t version)
: pg(pg), msg(msg), tid(tid), version(version) {}
void finish(int) {
void finish(int) override {
if (msg)
msg->mark_event("sub_op_applied");
pg->sub_write_applied(tid, version);
Expand Down Expand Up @@ -1227,7 +1227,7 @@ struct FinishReadOp : public GenContext<ThreadPool::TPHandle&> {
ECBackend *ec;
ceph_tid_t tid;
FinishReadOp(ECBackend *ec, ceph_tid_t tid) : ec(ec), tid(tid) {}
void finish(ThreadPool::TPHandle &handle) {
void finish(ThreadPool::TPHandle &handle) override {
auto ropiter = ec->tid_to_read_map.find(tid);
assert(ropiter != ec->tid_to_read_map.end());
int priority = ropiter->second.priority;
Expand Down Expand Up @@ -2138,7 +2138,7 @@ struct CallClientContexts :
ECBackend::ClientAsyncReadStatus *status,
const list<boost::tuple<uint64_t, uint64_t, uint32_t> > &to_read)
: hoid(hoid), ec(ec), status(status), to_read(to_read) {}
void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) {
void finish(pair<RecoveryMessages *, ECBackend::read_result_t &> &in) override {
ECBackend::read_result_t &res = in.second;
extent_map result;
if (res.r != 0)
Expand Down