Skip to content

Commit

Permalink
Add a listener callback for end of auto error recovery (#9244)
Browse files Browse the repository at this point in the history
Summary:
Previously, the OnErrorRecoveryCompleted callback was called when
RocksDB was able to successfully recover from a retryable error.
However, if the recovery failed and was eventually stopped, there was no
indication of the status. To fix that, a new OnErrorRecoveryEnd callback
is introduced that deprecates the OnErrorRecoveryCompleted callback. The
new callback is called with the original error and the new error status.

Pull Request resolved: #9244

Test Plan: Add a new unit test in error_handler_fs_test

Reviewed By: zhichao-cao

Differential Revision: D32922303

Pulled By: anand1976

fbshipit-source-id: f04e77a9cb92c5ea6385590682d3fcf559971b99
  • Loading branch information
anand76 authored and facebook-github-bot committed Dec 8, 2021
1 parent 9e4d56f commit ecf2bec
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 9 deletions.
1 change: 1 addition & 0 deletions HISTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* MemTableList::TrimHistory now use allocated bytes when max_write_buffer_size_to_maintain > 0(default in TrasactionDB, introduced in PR#5022) Fix #8371.
### Public API change
* Extend WriteBatch::AssignTimestamp and AssignTimestamps API so that both functions can accept an optional `checker` argument that performs additional checking on timestamp sizes.
* Introduce a new EventListener callback that will be called upon the end of automatic error recovery.

### Performance Improvements
* Replaced map property `TableProperties::properties_offsets` with uint64_t property `external_sst_file_global_seqno_offset` to save table properties's memory.
Expand Down
27 changes: 23 additions & 4 deletions db/error_handler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -540,15 +540,17 @@ Status ErrorHandler::ClearBGError() {
// Signal that recovery succeeded
if (recovery_error_.ok()) {
Status old_bg_error = bg_error_;
// old_bg_error is only for notifying listeners, so may not be checked
old_bg_error.PermitUncheckedError();
// Clear and check the recovery IO and BG error
bg_error_ = Status::OK();
recovery_io_error_ = IOStatus::OK();
bg_error_.PermitUncheckedError();
recovery_io_error_.PermitUncheckedError();
recovery_in_prog_ = false;
soft_error_no_bg_work_ = false;
EventHelpers::NotifyOnErrorRecoveryCompleted(db_options_.listeners,
old_bg_error, db_mutex_);
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, old_bg_error,
bg_error_, db_mutex_);
}
return recovery_error_;
#else
Expand Down Expand Up @@ -665,6 +667,9 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeStart");
InstrumentedMutexLock l(db_mutex_);
if (end_recovery_) {
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
Status::ShutdownInProgress(),
db_mutex_);
return;
}
DBRecoverContext context = recover_context_;
Expand All @@ -674,6 +679,9 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
// Recover from the retryable error. Create a separate thread to do it.
while (resume_count > 0) {
if (end_recovery_) {
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
Status::ShutdownInProgress(),
db_mutex_);
return;
}
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:BeforeResume0");
Expand All @@ -695,6 +703,8 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
RecordInHistogram(bg_error_stats_.get(),
ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count);
}
EventHelpers::NotifyOnErrorRecoveryEnd(db_options_.listeners, bg_error_,
bg_error_, db_mutex_);
return;
}
if (!recovery_io_error_.ok() &&
Expand All @@ -718,8 +728,8 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
Status old_bg_error = bg_error_;
bg_error_ = Status::OK();
bg_error_.PermitUncheckedError();
EventHelpers::NotifyOnErrorRecoveryCompleted(db_options_.listeners,
old_bg_error, db_mutex_);
EventHelpers::NotifyOnErrorRecoveryEnd(
db_options_.listeners, old_bg_error, bg_error_, db_mutex_);
if (bg_error_stats_ != nullptr) {
RecordTick(bg_error_stats_.get(),
ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT);
Expand All @@ -739,12 +749,21 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
RecordInHistogram(bg_error_stats_.get(),
ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count);
}
EventHelpers::NotifyOnErrorRecoveryEnd(
db_options_.listeners, bg_error_,
!recovery_io_error_.ok()
? recovery_io_error_
: (!recovery_error_.ok() ? recovery_error_ : s),
db_mutex_);
return;
}
}
resume_count--;
}
recovery_in_prog_ = false;
EventHelpers::NotifyOnErrorRecoveryEnd(
db_options_.listeners, bg_error_,
Status::Aborted("Exceeded resume retry count"), db_mutex_);
TEST_SYNC_POINT("RecoverFromRetryableBGIOError:LoopOut");
if (bg_error_stats_ != nullptr) {
RecordInHistogram(bg_error_stats_.get(),
Expand Down
47 changes: 47 additions & 0 deletions db/error_handler_fs_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ class ErrorHandlerFSListener : public EventListener {
~ErrorHandlerFSListener() {
file_creation_error_.PermitUncheckedError();
bg_error_.PermitUncheckedError();
new_bg_error_.PermitUncheckedError();
}

void OnTableFileCreationStarted(
Expand Down Expand Up @@ -96,6 +97,13 @@ class ErrorHandlerFSListener : public EventListener {
old_bg_error.PermitUncheckedError();
}

void OnErrorRecoveryEnd(const BackgroundErrorRecoveryInfo& info) override {
InstrumentedMutexLock l(&mutex_);
recovery_complete_ = true;
cv_.SignalAll();
new_bg_error_ = info.new_bg_error;
}

bool WaitForRecovery(uint64_t /*abs_time_us*/) {
InstrumentedMutexLock l(&mutex_);
while (!recovery_complete_) {
Expand Down Expand Up @@ -138,6 +146,8 @@ class ErrorHandlerFSListener : public EventListener {
file_creation_error_ = io_s;
}

Status new_bg_error() { return new_bg_error_; }

private:
InstrumentedMutex mutex_;
InstrumentedCondVar cv_;
Expand All @@ -148,6 +158,7 @@ class ErrorHandlerFSListener : public EventListener {
int file_count_;
IOStatus file_creation_error_;
Status bg_error_;
Status new_bg_error_;
FaultInjectionTestFS* fault_fs_;
};

Expand Down Expand Up @@ -2428,6 +2439,42 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableErrorAutoRecover2) {
Close();
}

// Fail auto resume from a flush retryable error and verify that
// OnErrorRecoveryEnd listener callback is called
TEST_F(DBErrorHandlingFSTest, FLushWritRetryableErrorAbortRecovery) {
// Activate the FS before the first resume
std::shared_ptr<ErrorHandlerFSListener> listener(
new ErrorHandlerFSListener());
Options options = GetDefaultOptions();
options.env = fault_env_.get();
options.create_if_missing = true;
options.listeners.emplace_back(listener);
options.max_bgerror_resume_count = 2;
options.bgerror_resume_retry_interval = 100000; // 0.1 second
Status s;

listener->EnableAutoRecovery(false);
DestroyAndReopen(options);

IOStatus error_msg = IOStatus::IOError("Retryable IO Error");
error_msg.SetRetryable(true);

ASSERT_OK(Put(Key(1), "val1"));
SyncPoint::GetInstance()->SetCallBack(
"BuildTable:BeforeFinishBuildTable",
[&](void*) { fault_fs_->SetFilesystemActive(false, error_msg); });

SyncPoint::GetInstance()->EnableProcessing();
s = Flush();
ASSERT_EQ(s.severity(), ROCKSDB_NAMESPACE::Status::Severity::kSoftError);
ASSERT_EQ(listener->WaitForRecovery(5000000), true);
ASSERT_EQ(listener->new_bg_error(), Status::Aborted());
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);

Destroy(options);
}

class DBErrorHandlingFencingTest : public DBErrorHandlingFSTest,
public testing::WithParamInterface<bool> {};

Expand Down
13 changes: 10 additions & 3 deletions db/event_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -232,23 +232,30 @@ void EventHelpers::LogAndNotifyTableFileDeletion(
#endif // !ROCKSDB_LITE
}

void EventHelpers::NotifyOnErrorRecoveryCompleted(
void EventHelpers::NotifyOnErrorRecoveryEnd(
const std::vector<std::shared_ptr<EventListener>>& listeners,
Status old_bg_error, InstrumentedMutex* db_mutex) {
const Status& old_bg_error, const Status& new_bg_error,
InstrumentedMutex* db_mutex) {
#ifndef ROCKSDB_LITE
if (!listeners.empty()) {
db_mutex->AssertHeld();
// release lock while notifying events
db_mutex->Unlock();
for (auto& listener : listeners) {
BackgroundErrorRecoveryInfo info;
info.old_bg_error = old_bg_error;
info.new_bg_error = new_bg_error;
listener->OnErrorRecoveryCompleted(old_bg_error);
listener->OnErrorRecoveryEnd(info);
info.old_bg_error.PermitUncheckedError();
info.new_bg_error.PermitUncheckedError();
}
db_mutex->Lock();
}
old_bg_error.PermitUncheckedError();
#else
(void)listeners;
(void)old_bg_error;
(void)new_bg_error;
(void)db_mutex;
#endif // ROCKSDB_LITE
}
Expand Down
5 changes: 3 additions & 2 deletions db/event_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,10 @@ class EventHelpers {
uint64_t file_number, const std::string& file_path,
const Status& status, const std::string& db_name,
const std::vector<std::shared_ptr<EventListener>>& listeners);
static void NotifyOnErrorRecoveryCompleted(
static void NotifyOnErrorRecoveryEnd(
const std::vector<std::shared_ptr<EventListener>>& listeners,
Status bg_error, InstrumentedMutex* db_mutex);
const Status& old_bg_error, const Status& new_bg_error,
InstrumentedMutex* db_mutex);

#ifndef ROCKSDB_LITE
static void NotifyBlobFileCreationStarted(
Expand Down
19 changes: 19 additions & 0 deletions include/rocksdb/listener.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,16 @@ struct ExternalFileIngestionInfo {
TableProperties table_properties;
};

// Result of auto background error recovery
struct BackgroundErrorRecoveryInfo {
// The original error that triggered the recovery
Status old_bg_error;

// The final bg_error after all recovery attempts. Status::OK() means
// the recovery was successful and the database is fully operational.
Status new_bg_error;
};

struct IOErrorInfo {
IOErrorInfo(const IOStatus& _io_status, FileOperationType _operation,
const std::string& _file_path, size_t _length, uint64_t _offset)
Expand Down Expand Up @@ -691,12 +701,21 @@ class EventListener : public Customizable {
Status /* bg_error */,
bool* /* auto_recovery */) {}

// DEPRECATED
// A callback function for RocksDB which will be called once the database
// is recovered from read-only mode after an error. When this is called, it
// means normal writes to the database can be issued and the user can
// initiate any further recovery actions needed
virtual void OnErrorRecoveryCompleted(Status /* old_bg_error */) {}

// A callback function for RocksDB which will be called once the recovery
// attempt from a background retryable error is completed. The recovery
// may have been successful or not. In either case, the callback is called
// with the old and new error. If info.new_bg_error is Status::OK(), that
// means the recovery succeeded.
virtual void OnErrorRecoveryEnd(const BackgroundErrorRecoveryInfo& /*info*/) {
}

// A callback function for RocksDB which will be called before
// a blob file is being created. It will follow by OnBlobFileCreated after
// the creation finishes.
Expand Down

0 comments on commit ecf2bec

Please sign in to comment.