Skip to content

Commit

Permalink
librbd: cancel ops before blocking IO
Browse files Browse the repository at this point in the history
Ops might have in-flight IO -- blocking IO after canceling the ops
will result in the in-flight IO being flushed.  Shutdown also requires
an intermediate state where is still acts like it downs the lock
until after all ops are canceled and all IO is flushed.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
  • Loading branch information
Jason Dillaman committed Apr 12, 2016
1 parent e90e3d9 commit 8451c0f
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 38 deletions.
27 changes: 21 additions & 6 deletions src/librbd/ExclusiveLock.cc
Expand Up @@ -61,6 +61,7 @@ bool ExclusiveLock<I>::is_lock_owner() const {
case STATE_LOCKED:
case STATE_POST_ACQUIRING:
case STATE_PRE_RELEASING:
case STATE_PRE_SHUTTING_DOWN:
lock_owner = true;
break;
default:
Expand Down Expand Up @@ -214,6 +215,7 @@ bool ExclusiveLock<I>::is_transition_state() const {
case STATE_POST_ACQUIRING:
case STATE_PRE_RELEASING:
case STATE_RELEASING:
case STATE_PRE_SHUTTING_DOWN:
case STATE_SHUTTING_DOWN:
return true;
case STATE_UNINITIALIZED:
Expand Down Expand Up @@ -471,13 +473,13 @@ void ExclusiveLock<I>::send_shutdown() {
if (m_state == STATE_UNLOCKED) {
m_state = STATE_SHUTTING_DOWN;
m_image_ctx.op_work_queue->queue(util::create_context_callback<
ExclusiveLock<I>, &ExclusiveLock<I>::handle_unlocked_shutdown>(this), 0);
ExclusiveLock<I>, &ExclusiveLock<I>::handle_shutdown>(this), 0);
return;
}

ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
assert(m_state == STATE_LOCKED);
m_state = STATE_SHUTTING_DOWN;
m_state = STATE_PRE_SHUTTING_DOWN;

m_lock.Unlock();
m_image_ctx.op_work_queue->queue(new C_ShutDownRelease(this), 0);
Expand All @@ -494,13 +496,26 @@ void ExclusiveLock<I>::send_shutdown_release() {

using el = ExclusiveLock<I>;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(
m_image_ctx, cookie, nullptr,
util::create_context_callback<el, &el::handle_locked_shutdown>(this));
m_image_ctx, cookie,
util::create_context_callback<el, &el::handle_shutdown_releasing>(this),
util::create_context_callback<el, &el::handle_shutdown_released>(this));
req->send();
}

template <typename I>
void ExclusiveLock<I>::handle_locked_shutdown(int r) {
void ExclusiveLock<I>::handle_shutdown_releasing(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;

assert(r == 0);
assert(m_state == STATE_PRE_SHUTTING_DOWN);

// all IO and ops should be blocked/canceled by this point
m_state = STATE_SHUTTING_DOWN;
}

template <typename I>
void ExclusiveLock<I>::handle_shutdown_released(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;

Expand All @@ -522,7 +537,7 @@ void ExclusiveLock<I>::handle_locked_shutdown(int r) {
}

template <typename I>
void ExclusiveLock<I>::handle_unlocked_shutdown(int r) {
void ExclusiveLock<I>::handle_shutdown(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;

Expand Down
8 changes: 5 additions & 3 deletions src/librbd/ExclusiveLock.h
Expand Up @@ -67,7 +67,7 @@ class ExclusiveLock {
* |
* |
* v
* SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
* PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
*/
enum State {
STATE_UNINITIALIZED,
Expand All @@ -79,6 +79,7 @@ class ExclusiveLock {
STATE_WAITING_FOR_PEER,
STATE_PRE_RELEASING,
STATE_RELEASING,
STATE_PRE_SHUTTING_DOWN,
STATE_SHUTTING_DOWN,
STATE_SHUTDOWN,
};
Expand Down Expand Up @@ -151,8 +152,9 @@ class ExclusiveLock {

void send_shutdown();
void send_shutdown_release();
void handle_locked_shutdown(int r);
void handle_unlocked_shutdown(int r);
void handle_shutdown_releasing(int r);
void handle_shutdown_released(int r);
void handle_shutdown(int r);
void complete_shutdown(int r);
};

Expand Down
44 changes: 22 additions & 22 deletions src/librbd/exclusive_lock/ReleaseRequest.cc
Expand Up @@ -50,58 +50,58 @@ ReleaseRequest<I>::~ReleaseRequest() {

template <typename I>
void ReleaseRequest<I>::send() {
send_block_writes();
send_cancel_op_requests();
}

template <typename I>
void ReleaseRequest<I>::send_block_writes() {
void ReleaseRequest<I>::send_cancel_op_requests() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;

using klass = ReleaseRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_block_writes>(this);

{
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
m_image_ctx.aio_work_queue->set_require_lock_on_read();
}
m_image_ctx.aio_work_queue->block_writes(ctx);
}
klass, &klass::handle_cancel_op_requests>(this);
m_image_ctx.cancel_async_requests(ctx);
}

template <typename I>
Context *ReleaseRequest<I>::handle_block_writes(int *ret_val) {
Context *ReleaseRequest<I>::handle_cancel_op_requests(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;

if (*ret_val < 0) {
m_image_ctx.aio_work_queue->unblock_writes();
return m_on_finish;
}
assert(*ret_val == 0);

send_cancel_op_requests();
send_block_writes();
return nullptr;
}

template <typename I>
void ReleaseRequest<I>::send_cancel_op_requests() {
void ReleaseRequest<I>::send_block_writes() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;

using klass = ReleaseRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_cancel_op_requests>(this);
m_image_ctx.cancel_async_requests(ctx);
klass, &klass::handle_block_writes>(this);

{
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
m_image_ctx.aio_work_queue->set_require_lock_on_read();
}
m_image_ctx.aio_work_queue->block_writes(ctx);
}
}

template <typename I>
Context *ReleaseRequest<I>::handle_cancel_op_requests(int *ret_val) {
Context *ReleaseRequest<I>::handle_block_writes(int *ret_val) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;

assert(*ret_val == 0);
if (*ret_val < 0) {
m_image_ctx.aio_work_queue->unblock_writes();
return m_on_finish;
}

if (m_on_releasing != nullptr) {
// alert caller that we no longer own the exclusive lock
Expand Down
10 changes: 5 additions & 5 deletions src/librbd/exclusive_lock/ReleaseRequest.h
Expand Up @@ -33,10 +33,10 @@ class ReleaseRequest {
* <start>
* |
* v
* BLOCK_WRITES
* CANCEL_OP_REQUESTS
* |
* v
* CANCEL_OP_REQUESTS
* BLOCK_WRITES
* |
* v
* FLUSH_NOTIFIES . . . . . . . . . . . . . .
Expand Down Expand Up @@ -67,12 +67,12 @@ class ReleaseRequest {
decltype(m_image_ctx.object_map) m_object_map;
decltype(m_image_ctx.journal) m_journal;

void send_block_writes();
Context *handle_block_writes(int *ret_val);

void send_cancel_op_requests();
Context *handle_cancel_op_requests(int *ret_val);

void send_block_writes();
Context *handle_block_writes(int *ret_val);

void send_flush_notifies();
Context *handle_flush_notifies(int *ret_val);

Expand Down
5 changes: 3 additions & 2 deletions src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
Expand Up @@ -93,8 +93,8 @@ TEST_F(TestMockExclusiveLockReleaseRequest, Success) {
expect_op_work_queue(mock_image_ctx);

InSequence seq;
expect_block_writes(mock_image_ctx, 0);
expect_cancel_op_requests(mock_image_ctx, 0);
expect_block_writes(mock_image_ctx, 0);
expect_flush_notifies(mock_image_ctx);

MockJournal *mock_journal = new MockJournal();
Expand Down Expand Up @@ -183,6 +183,7 @@ TEST_F(TestMockExclusiveLockReleaseRequest, BlockWritesError) {
expect_op_work_queue(mock_image_ctx);

InSequence seq;
expect_cancel_op_requests(mock_image_ctx, 0);
expect_block_writes(mock_image_ctx, -EINVAL);
expect_unblock_writes(mock_image_ctx);

Expand All @@ -204,8 +205,8 @@ TEST_F(TestMockExclusiveLockReleaseRequest, UnlockError) {
expect_op_work_queue(mock_image_ctx);

InSequence seq;
expect_block_writes(mock_image_ctx, 0);
expect_cancel_op_requests(mock_image_ctx, 0);
expect_block_writes(mock_image_ctx, 0);
expect_flush_notifies(mock_image_ctx);

expect_unlock(mock_image_ctx, -EINVAL);
Expand Down

0 comments on commit 8451c0f

Please sign in to comment.