Skip to content

Commit

Permalink
Merge pull request #11896 from ifed01/wip-bluestore-cleanup
Browse files Browse the repository at this point in the history
os/bluestore: cleanup around Blob::ref_map

Reviewed-by: Sage Weil <sage@redhat.com>
  • Loading branch information
liewegas committed Nov 11, 2016
2 parents 6b2dc4a + 0edfc9e commit abf5588
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 34 deletions.
42 changes: 21 additions & 21 deletions src/os/bluestore/BlueStore.cc
Expand Up @@ -1326,7 +1326,7 @@ ostream& operator<<(ostream& out, const BlueStore::Blob& b)
if (b.is_spanning()) {
out << " spanning " << b.id;
}
out << " " << b.get_blob() << " " << b.ref_map
out << " " << b.get_blob() << " " << b.get_ref_map()
<< (b.is_dirty() ? " (dirty)" : " (clean)")
<< " " << *b.shared_blob
<< ")";
Expand Down Expand Up @@ -1495,6 +1495,12 @@ bool BlueStore::Blob::put_ref(
return false;
}

void BlueStore::Blob::pass_ref(Blob* other, uint64_t src_offset, uint64_t length, uint64_t dest_offset)
{
ref_map.put(src_offset, length, nullptr);
other->ref_map.get(dest_offset, length);
}

void BlueStore::Blob::split(size_t blob_offset, Blob *r)
{
dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
Expand Down Expand Up @@ -1797,7 +1803,7 @@ bool BlueStore::ExtentMap::encode_some(uint32_t offset, uint32_t length,
denc_varint(0, bound); // logical_offset
denc_varint(0, bound); // len
denc_varint(0, bound); // blob_offset
p->blob->bound_encode(bound);
p->blob->bound_encode(bound, false);
}

{
Expand Down Expand Up @@ -1854,7 +1860,7 @@ bool BlueStore::ExtentMap::encode_some(uint32_t offset, uint32_t length,
}
pos = p->logical_offset + p->length;
if (include_blob) {
p->blob->encode(app);
p->blob->encode(app, false);
}
}
}
Expand Down Expand Up @@ -1916,13 +1922,13 @@ void BlueStore::ExtentMap::decode_some(bufferlist& bl)
assert(le->blob);
} else {
Blob *b = new Blob();
b->decode(p);
b->decode(p, false);
blobs[n] = b;
onode->c->open_shared_blob(b);
le->assign_blob(b);
}
// we build ref_map dynamically for non-spanning blobs
le->blob->ref_map.get(le->blob_offset, le->length);
le->blob->get_ref(le->blob_offset, le->length);
}
pos += prev_len;
++n;
Expand All @@ -1939,8 +1945,7 @@ void BlueStore::ExtentMap::bound_encode_spanning_blobs(size_t& p)
denc_varint((uint32_t)0, key_size);
p += spanning_blob_map.size() * key_size;
for (const auto& i : spanning_blob_map) {
i.second->bound_encode(p);
i.second->ref_map.bound_encode(p);
i.second->bound_encode(p, true);
}
}

Expand All @@ -1950,8 +1955,7 @@ void BlueStore::ExtentMap::encode_spanning_blobs(
denc_varint(spanning_blob_map.size(), p);
for (auto& i : spanning_blob_map) {
denc_varint(i.second->id, p);
i.second->encode(p);
i.second->ref_map.encode(p);
i.second->encode(p, true);
}
}

Expand All @@ -1965,8 +1969,7 @@ void BlueStore::ExtentMap::decode_spanning_blobs(
BlobRef b(new Blob());
denc_varint(b->id, p);
spanning_blob_map[b->id] = b;
b->decode(p);
b->ref_map.decode(p);
b->decode(p, true);
c->open_shared_blob(b);
}
}
Expand Down Expand Up @@ -2217,7 +2220,7 @@ BlueStore::Extent *BlueStore::ExtentMap::set_lextent(
extent_map_t *old_extents)
{
punch_hole(logical_offset, length, old_extents);
b->ref_map.get(blob_offset, length);
b->get_ref(blob_offset, length);
Extent *le = new Extent(logical_offset, blob_offset, length, blob_depth, b);
extent_map.insert(*le);
if (!needs_reshard && spans_shard(logical_offset, length)) {
Expand All @@ -2244,24 +2247,21 @@ BlueStore::BlobRef BlueStore::ExtentMap::split_blob(
if (ep->blob != lb) {
continue;
}
vector<bluestore_pextent_t> released;
if (ep->logical_offset < pos) {
// split extent
size_t left = pos - ep->logical_offset;
Extent *ne = new Extent(pos, 0, ep->length - left, ep->blob_depth, rb);
extent_map.insert(*ne);
lb->ref_map.put(ep->blob_offset + left, ep->length - left, &released);
lb->pass_ref(rb.get(), ep->blob_offset + left, ne->length, ne->blob_offset);
ep->length = left;
rb->ref_map.get(ne->blob_offset, ne->length);
dout(30) << __func__ << " split " << *ep << dendl;
dout(30) << __func__ << " to " << *ne << dendl;
} else {
// switch blob
assert(ep->blob_offset >= blob_offset);
lb->ref_map.put(ep->blob_offset, ep->length, &released);
lb->pass_ref(rb.get(), ep->blob_offset, ep->length, ep->blob_offset - blob_offset);
ep->blob = rb;
ep->blob_offset -= blob_offset;
rb->ref_map.get(ep->blob_offset, ep->length);
dout(30) << __func__ << " adjusted " << *ep << dendl;
}
}
Expand Down Expand Up @@ -4513,15 +4513,15 @@ int BlueStore::fsck(bool deep)
}
for (auto &i : ref_map) {
++num_blobs;
if (i.first->ref_map != i.second) {
if (i.first->get_ref_map() != i.second) {
derr << __func__ << " " << oid << " blob " << *i.first
<< " doesn't match expected ref_map " << i.second << dendl;
++errors;
}
const bluestore_blob_t& blob = i.first->get_blob();
if (blob.is_compressed()) {
expected_statfs.compressed += blob.compressed_length;
for (auto& r : i.first->ref_map.ref_map) {
for (auto& r : i.first->get_ref_map().ref_map) {
expected_statfs.compressed_original +=
r.second.refs * r.second.length;
}
Expand Down Expand Up @@ -7991,7 +7991,7 @@ void BlueStore::_wctx_finish(
}
}
delete &lo;
if (b->id >= 0 && b->ref_map.empty()) {
if (b->id >= 0 && b->get_ref_map().empty()) {
dout(20) << __func__ << " spanning_blob_map removing empty " << *b
<< dendl;
auto it = o->extent_map.spanning_blob_map.find(b->id);
Expand Down Expand Up @@ -8802,7 +8802,7 @@ int BlueStore::_do_clone_range(
e.blob_offset + skip_front,
e.length - skip_front - skip_back, e.blob_depth, cb);
newo->extent_map.extent_map.insert(*ne);
ne->blob->ref_map.get(ne->blob_offset, ne->length);
ne->blob->get_ref(ne->blob_offset, ne->length);
// fixme: we may leave parts of new blob unreferenced that could
// be freed (relative to the shared_blob).
txc->statfs_delta.stored() += ne->length;
Expand Down
40 changes: 31 additions & 9 deletions src/os/bluestore/BlueStore.h
Expand Up @@ -418,13 +418,12 @@ class BlueStore : public ObjectStore,
int16_t last_encoded_id = -1; ///< (ephemeral) used during encoding only
SharedBlobRef shared_blob; ///< shared blob state (if any)

/// refs from this shard. ephemeral if id<0, persisted if spanning.
bluestore_extent_ref_map_t ref_map;

private:
mutable bluestore_blob_t blob; ///< decoded blob metadata
mutable bool dirty = true; ///< true if blob is newer than blob_bl
mutable bufferlist blob_bl; ///< cached encoded blob
/// refs from this shard. ephemeral if id<0, persisted if spanning.
bluestore_extent_ref_map_t ref_map;

public:
Blob() {}
Expand All @@ -436,6 +435,9 @@ class BlueStore : public ObjectStore,

friend ostream& operator<<(ostream& out, const Blob &b);

const bluestore_extent_ref_map_t& get_ref_map() const {
return ref_map;
}
bool is_spanning() const {
return id >= 0;
}
Expand Down Expand Up @@ -479,6 +481,8 @@ class BlueStore : public ObjectStore,
/// put logical references, and get back any released extents
bool put_ref(uint64_t offset, uint64_t length, uint64_t min_alloc_size,
vector<bluestore_pextent_t> *r);
/// pass references for specific range to other blob
void pass_ref(Blob* other, uint64_t src_offset, uint64_t length, uint64_t dest_offset);

/// split the blob
void split(size_t blob_offset, Blob *o);
Expand All @@ -503,31 +507,49 @@ class BlueStore : public ObjectStore,
assert(blob_bl.length());
}
}
void bound_encode(size_t& p) const {
void bound_encode(size_t& p, bool include_ref_map) const {
_encode();
p += blob_bl.length();
if (include_ref_map) {
ref_map.bound_encode(p);
}
}
void encode(bufferlist::contiguous_appender& p) const {
void encode(bufferlist::contiguous_appender& p, bool include_ref_map) const {
_encode();
p.append(blob_bl);
if (include_ref_map) {
ref_map.encode(p);
}
}
void decode(bufferptr::iterator& p) {
void decode(bufferptr::iterator& p, bool include_ref_map) {
const char *start = p.get_pos();
denc(blob, p);
const char *end = p.get_pos();
blob_bl.clear();
blob_bl.append(start, end - start);
dirty = false;
if (include_ref_map) {
ref_map.decode(p);
}
}
#else
void bound_encode(size_t& p) const {
void bound_encode(size_t& p, bool include_ref_map) const {
denc(blob, p);
if (include_ref_map) {
ref_map.bound_encode(p);
}
}
void encode(bufferlist::contiguous_appender& p) const {
void encode(bufferlist::contiguous_appender& p, bool include_ref_map) const {
denc(blob, p);
if (include_ref_map) {
ref_map.encode(p);
}
}
void decode(bufferptr::iterator& p) {
void decode(bufferptr::iterator& p, bool include_ref_map) {
denc(blob, p);
if (include_ref_map) {
ref_map.decode(p);
}
}
#endif
};
Expand Down
8 changes: 4 additions & 4 deletions src/test/objectstore/test_bluestore_types.cc
Expand Up @@ -341,8 +341,8 @@ TEST(Blob, put_ref)
b.dirty_blob().extents.push_back(
bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x8000));
b.dirty_blob().extents.push_back(bluestore_pextent_t(0x4071f000, 0x5000));
b.ref_map.get(0, 0x1200);
b.ref_map.get(0xae00, 0x4200);
b.get_ref(0, 0x1200);
b.get_ref(0xae00, 0x4200);
cout << b << std::endl;
vector<bluestore_pextent_t> r;

Expand Down Expand Up @@ -697,8 +697,8 @@ TEST(Blob, put_ref)
B.shared_blob = new BlueStore::SharedBlob(-1, string(), nullptr);
B.shared_blob->get(); // hack to avoid dtor from running
bluestore_blob_t& b = B.dirty_blob();
B.ref_map.get(0x0, 0x3800);
B.ref_map.get(0x17c00, 0x6400);
B.get_ref(0x0, 0x3800);
B.get_ref(0x17c00, 0x6400);
b.extents.push_back(bluestore_pextent_t(0x40101000, 0x4000));
b.extents.push_back(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET,
0x13000));
Expand Down

0 comments on commit abf5588

Please sign in to comment.