Skip to content

Commit

Permalink
librbd: integrate passthrough image cache
Browse files Browse the repository at this point in the history
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
  • Loading branch information
Jason Dillaman committed May 13, 2016
1 parent 15a8f5a commit b00130f
Show file tree
Hide file tree
Showing 3 changed files with 94 additions and 25 deletions.
32 changes: 26 additions & 6 deletions src/librbd/AioImageRequest.cc
Expand Up @@ -10,6 +10,7 @@
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/cache/ImageCache.h"
#include "librbd/journal/Types.h"
#include "include/rados/librados.hpp"
#include "osdc/Striper.h"
Expand Down Expand Up @@ -147,8 +148,7 @@ void AioImageRequest<I>::send() {
return;
}

// TODO
if (m_bypass_image_cache || true) {
if (m_bypass_image_cache || m_image_ctx.image_cache == nullptr) {
send_request();
} else {
send_image_cache_request();
Expand Down Expand Up @@ -249,7 +249,14 @@ void AioImageRead::send_request() {
}

void AioImageRead::send_image_cache_request() {
// TODO
assert(m_image_ctx.image_cache != nullptr);

m_aio_comp->set_request_count(1);
C_ImageCacheRead *req_comp = new C_ImageCacheRead(m_aio_comp,
m_image_extents);
m_image_ctx.image_cache->aio_read(std::move(m_image_extents),
&req_comp->get_data(), m_op_flags,
req_comp);
}

int AbstractAioImageWrite::clip_request() {
Expand Down Expand Up @@ -362,7 +369,12 @@ uint64_t AioImageWrite::append_journal_event(
}

void AioImageWrite::send_image_cache_request() {
// TODO
assert(m_image_ctx.image_cache != nullptr);

m_aio_comp->set_request_count(1);
C_AioRequest *req_comp = new C_AioRequest(m_aio_comp);
m_image_ctx.image_cache->aio_write(m_off, std::move(m_bl), m_op_flags,
req_comp);
}

void AioImageWrite::send_cache_requests(const ObjectExtents &object_extents,
Expand Down Expand Up @@ -428,7 +440,11 @@ uint32_t AioImageDiscard::get_cache_request_count(bool journaling) const {
}

void AioImageDiscard::send_image_cache_request() {
// TODO
assert(m_image_ctx.image_cache != nullptr);

m_aio_comp->set_request_count(1);
C_AioRequest *req_comp = new C_AioRequest(m_aio_comp);
m_image_ctx.image_cache->aio_discard(m_off, m_len, req_comp);
}

void AioImageDiscard::send_cache_requests(const ObjectExtents &object_extents,
Expand Down Expand Up @@ -509,7 +525,11 @@ void AioImageFlush::send_request() {
}

void AioImageFlush::send_image_cache_request() {
// TODO
assert(m_image_ctx.image_cache != nullptr);

m_aio_comp->set_request_count(1);
C_AioRequest *req_comp = new C_AioRequest(m_aio_comp);
m_image_ctx.image_cache->aio_flush(req_comp);
}

} // namespace librbd
Expand Down
85 changes: 66 additions & 19 deletions src/librbd/ImageCtx.cc
Expand Up @@ -14,6 +14,7 @@
#include "librbd/AioCompletion.h"
#include "librbd/AsyncOperation.h"
#include "librbd/AsyncRequest.h"
#include "librbd/cache/PassthroughImageCache.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/internal.h"
Expand Down Expand Up @@ -82,8 +83,14 @@ struct C_ShutDownCache : public Context {
: image_ctx(_image_ctx), on_finish(_on_finish) {
}
virtual void finish(int r) {
image_ctx->object_cacher->stop();
on_finish->complete(r);
if (image_ctx->object_cacher != nullptr) {
image_ctx->object_cacher->stop();
}

// TODO: don't drop the previous result
if (image_ctx->image_cache != nullptr) {
image_ctx->image_cache->shut_down(on_finish);
}
}
};

Expand All @@ -104,17 +111,24 @@ struct C_InvalidateCache : public Context {

if (r == -EBLACKLISTED) {
lderr(cct) << "Blacklisted during flush! Purging cache..." << dendl;
image_ctx->object_cacher->purge_set(image_ctx->object_set);
if (image_ctx->object_cacher != nullptr) {
image_ctx->object_cacher->purge_set(image_ctx->object_set);
}
} else if (r != 0 && purge_on_error) {
lderr(cct) << "invalidate cache encountered error "
<< cpp_strerror(r) << " !Purging cache..." << dendl;
image_ctx->object_cacher->purge_set(image_ctx->object_set);
if (image_ctx->object_cacher != nullptr) {
image_ctx->object_cacher->purge_set(image_ctx->object_set);
}
} else if (r != 0) {
lderr(cct) << "flush_cache returned " << r << dendl;
}

loff_t unclean = image_ctx->object_cacher->release_set(
image_ctx->object_set);
loff_t unclean = 0;
if (image_ctx->object_cacher != nullptr) {
unclean = image_ctx->object_cacher->release_set(
image_ctx->object_set);
}
if (unclean == 0) {
r = 0;
} else {
Expand Down Expand Up @@ -207,6 +221,10 @@ struct C_InvalidateCache : public Context {
delete object_cacher;
object_cacher = NULL;
}
if (image_cache) {
delete image_cache;
image_cache = nullptr;
}
if (writeback_handler) {
delete writeback_handler;
writeback_handler = NULL;
Expand Down Expand Up @@ -249,6 +267,18 @@ struct C_InvalidateCache : public Context {

perf_start(pname);

{
// TODO: dummy passthrough image cache always enabled
image_cache = new cache::PassthroughImageCache<>(*this);

// TODO: integrate into open image state machine
C_SaferCond ctx;
image_cache->init(&ctx);

int r = ctx.wait();
assert(r == 0);
}

if (cache) {
Mutex::Locker l(cache_lock);
ldout(cct, 20) << "enabling caching..." << dendl;
Expand Down Expand Up @@ -733,34 +763,48 @@ struct C_InvalidateCache : public Context {
}

void ImageCtx::flush_cache(Context *onfinish) {
if (object_cacher == nullptr) {
onfinish = new FunctionContext([this, onfinish](int r) {
Mutex::Locker cacher_locker(cache_lock);
onfinish->complete(r);
});
op_work_queue->queue(onfinish);
return;
}

cache_lock.Lock();
object_cacher->flush_set(object_set, onfinish);
cache_lock.Unlock();
}

void ImageCtx::shut_down_cache(Context *on_finish) {
if (object_cacher == NULL) {
if (image_cache == nullptr && object_cacher == nullptr) {
on_finish->complete(0);
return;
}

cache_lock.Lock();
object_cacher->release_set(object_set);
cache_lock.Unlock();
if (object_cacher != nullptr) {
cache_lock.Lock();
object_cacher->release_set(object_set);
cache_lock.Unlock();
}

// TODO need to invalidate persistent client cache if VM migrates
C_ShutDownCache *shut_down = new C_ShutDownCache(this, on_finish);
flush_cache(new C_InvalidateCache(this, true, false, shut_down));
}

int ImageCtx::invalidate_cache(bool purge_on_error) {
flush_async_operations();
if (object_cacher == NULL) {
if (image_cache == nullptr && object_cacher == nullptr) {
return 0;
}

cache_lock.Lock();
object_cacher->release_set(object_set);
cache_lock.Unlock();
if (object_cacher != nullptr) {
cache_lock.Lock();
object_cacher->release_set(object_set);
cache_lock.Unlock();
}

C_SaferCond ctx;
flush_cache(new C_InvalidateCache(this, purge_on_error, true, &ctx));
Expand All @@ -770,14 +814,16 @@ struct C_InvalidateCache : public Context {
}

void ImageCtx::invalidate_cache(Context *on_finish) {
if (object_cacher == NULL) {
if (image_cache == nullptr && object_cacher == nullptr) {
op_work_queue->queue(on_finish, 0);
return;
}

cache_lock.Lock();
object_cacher->release_set(object_set);
cache_lock.Unlock();
if (object_cacher != nullptr) {
cache_lock.Lock();
object_cacher->release_set(object_set);
cache_lock.Unlock();
}

flush_cache(new C_InvalidateCache(this, false, false, on_finish));
}
Expand Down Expand Up @@ -846,7 +892,8 @@ struct C_InvalidateCache : public Context {
// ensure no locks are held when flush is complete
on_safe = util::create_async_context_callback(*this, on_safe);

if (object_cacher != NULL) {
// TODO user vs system flush
if (image_cache != nullptr || object_cacher != nullptr) {
// flush cache after completing all in-flight AIO ops
on_safe = new C_FlushCache(this, on_safe);
}
Expand Down
2 changes: 2 additions & 0 deletions src/librbd/ImageCtx.h
Expand Up @@ -52,6 +52,7 @@ namespace librbd {
class ObjectMap;
template <typename> class Operations;

namespace cache { struct ImageCache; }
namespace exclusive_lock { struct Policy; }
namespace journal { struct Policy; }

Expand Down Expand Up @@ -127,6 +128,7 @@ namespace librbd {

file_layout_t layout;

cache::ImageCache *image_cache = nullptr;
ObjectCacher *object_cacher;
LibrbdWriteback *writeback_handler;
ObjectCacher::ObjectSet *object_set;
Expand Down

0 comments on commit b00130f

Please sign in to comment.