Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

os/bluestore: pack a few more in-memory types #11328

Merged
merged 6 commits into from Oct 6, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
37 changes: 18 additions & 19 deletions src/os/bluestore/BlueStore.cc
Expand Up @@ -1043,29 +1043,28 @@ void BlueStore::BufferSpace::finish_write(uint64_t seq)
{
std::lock_guard<std::recursive_mutex> l(cache->lock);

auto i = writing_map.begin();
while (i != writing_map.end()) {
if (i->first > seq)
auto i = writing.begin();
while (i != writing.end()) {
if (i->seq > seq) {
break;
}
if (i->seq < seq) {
++i;
continue;
}

auto l = i->second.begin();
while (l != i->second.end()) {
Buffer *b = &*l;
dout(20) << __func__ << " " << *b << dendl;
assert(b->is_writing());
Buffer *b = &*i;
dout(20) << __func__ << " " << *b << dendl;
assert(b->is_writing());

if (b->flags & Buffer::FLAG_NOCACHE) {
i->second.erase(l++);
buffer_map.erase(b->offset);
} else {
b->state = Buffer::STATE_CLEAN;
i->second.erase(l++);
cache->_add_buffer(b, 1, nullptr);
}
if (b->flags & Buffer::FLAG_NOCACHE) {
writing.erase(i++);
buffer_map.erase(b->offset);
} else {
b->state = Buffer::STATE_CLEAN;
writing.erase(i++);
cache->_add_buffer(b, 1, nullptr);
}

assert(i->second.empty());
writing_map.erase(i++);
}

cache->_audit("finish_write end");
Expand Down
41 changes: 19 additions & 22 deletions src/os/bluestore/BlueStore.h
Expand Up @@ -147,16 +147,16 @@ class BlueStore : public ObjectStore,
uint16_t cache_private = 0; ///< opaque (to us) value used by Cache impl
uint32_t flags; ///< FLAG_*
uint64_t seq;
uint64_t offset, length;
uint32_t offset, length;
bufferlist data;

boost::intrusive::list_member_hook<> lru_item;
boost::intrusive::list_member_hook<> state_item;

Buffer(BufferSpace *space, unsigned s, uint64_t q, uint64_t o, uint64_t l,
Buffer(BufferSpace *space, unsigned s, uint64_t q, uint32_t o, uint32_t l,
unsigned f = 0)
: space(space), state(s), flags(f), seq(q), offset(o), length(l) {}
Buffer(BufferSpace *space, unsigned s, uint64_t q, uint64_t o, bufferlist& b,
Buffer(BufferSpace *space, unsigned s, uint64_t q, uint32_t o, bufferlist& b,
unsigned f = 0)
: space(space), state(s), flags(f), seq(q), offset(o),
length(b.length()), data(b) {}
Expand All @@ -171,11 +171,11 @@ class BlueStore : public ObjectStore,
return state == STATE_WRITING;
}

uint64_t end() const {
uint32_t end() const {
return offset + length;
}

void truncate(uint64_t newlen) {
void truncate(uint32_t newlen) {
assert(newlen < length);
if (data.length()) {
bufferlist t;
Expand Down Expand Up @@ -207,7 +207,11 @@ class BlueStore : public ObjectStore,

map<uint64_t,std::unique_ptr<Buffer>> buffer_map;
Cache *cache;
map<uint64_t, state_list_t> writing_map;

// we use a bare intrusive list here instead of std::map because
// it uses less memory and we expect this to be very small (very
// few IOs in flight to the same Blob at the same time).
state_list_t writing; ///< writing buffers, sorted by seq, ascending

BufferSpace(Cache *c) : cache(c) {
if (cache) {
Expand All @@ -216,7 +220,7 @@ class BlueStore : public ObjectStore,
}
~BufferSpace() {
assert(buffer_map.empty());
assert(writing_map.empty());
assert(writing.empty());
if (cache) {
cache->rm_blob();
}
Expand All @@ -226,7 +230,7 @@ class BlueStore : public ObjectStore,
cache->_audit("_add_buffer start");
buffer_map[b->offset].reset(b);
if (b->is_writing()) {
writing_map[b->seq].push_back(*b);
writing.push_back(*b);
} else {
cache->_add_buffer(b, level, near);
}
Expand All @@ -238,12 +242,7 @@ class BlueStore : public ObjectStore,
void _rm_buffer(map<uint64_t,std::unique_ptr<Buffer>>::iterator p) {
cache->_audit("_rm_buffer start");
if (p->second->is_writing()) {
uint64_t seq = (*p->second.get()).seq;
auto it = writing_map.find(seq);
assert(it != writing_map.end());
it->second.erase(it->second.iterator_to(*p->second));
if (it->second.empty())
writing_map.erase(it);
writing.erase(writing.iterator_to(*p->second));
} else {
cache->_rm_buffer(p->second.get());
}
Expand Down Expand Up @@ -318,15 +317,15 @@ class BlueStore : public ObjectStore,
struct SharedBlob : public boost::intrusive::unordered_set_base_hook<> {
std::atomic_int nref = {0}; ///< reference count

// these are defined/set if the shared_blob is 'loaded'
bool loaded = false; ///< whether shared_blob_t is loaded
bluestore_shared_blob_t shared_blob; ///< the actual shared state

// these are defined/set if the blob is marked 'shared'
uint64_t sbid = 0; ///< shared blob id
string key; ///< key in kv store
SharedBlobSet *parent_set = 0; ///< containing SharedBlobSet

// these are defined/set if the shared_blob is 'loaded'
bluestore_shared_blob_t shared_blob; ///< the actual shared state
bool loaded = false; ///< whether shared_blob_t is loaded

BufferSpace bc; ///< buffer cache

SharedBlob(uint64_t i, const string& k, Cache *c);
Expand Down Expand Up @@ -407,15 +406,13 @@ class BlueStore : public ObjectStore,
/// in-memory blob metadata and associated cached buffers (if any)
struct Blob : public boost::intrusive::set_base_hook<> {
std::atomic_int nref = {0}; ///< reference count
int id = -1; ///< id, for spanning blobs only, >= 0
int16_t id = -1; ///< id, for spanning blobs only, >= 0
int16_t last_encoded_id = -1; ///< (ephemeral) used during encoding only
SharedBlobRef shared_blob; ///< shared blob state (if any)

/// refs from this shard. ephemeral if id<0, persisted if spanning.
bluestore_extent_ref_map_t ref_map;


int last_encoded_id = -1; ///< (ephemeral) used during encoding only

private:
mutable bluestore_blob_t blob; ///< decoded blob metadata
mutable bool dirty = true; ///< true if blob is newer than blob_bl
Expand Down
3 changes: 3 additions & 0 deletions src/test/objectstore/test_bluestore_types.cc
Expand Up @@ -26,6 +26,9 @@ TEST(bluestore, sizeof) {
P(BlueStore::extent_map_t);
P(BlueStore::blob_map_t);
P(BlueStore::BufferSpace);
P(BlueStore::Buffer);
P(bluestore_onode_t);
P(bluestore_blob_t);
P(bluestore_extent_ref_map_t);
P(bluestore_extent_ref_map_t::record_t);
P(std::atomic_int);
Expand Down