Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mimic: os/bluestore: backport new bitmap allocator #26983

Merged
merged 21 commits into from Apr 1, 2019
Merged
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
480b285
test/allocator: get rid off bitmap allocator specifics and introduce new
ifed01 May 3, 2018
dd0912f
os/bluestore: add new bitmap allocator
ifed01 May 3, 2018
ba5c402
test/allocator: move bluestore allocator's benchmarks to a standalone UT
ifed01 May 4, 2018
d8d6cdb
os/bluestore: get rid off allocator's reserve method
ifed01 May 7, 2018
4773293
os/bluestore: add release(PExtentVector) helper to Allocator class to
ifed01 May 7, 2018
5525533
os/bluestore: fix improper access to a BitmapFastAllocator::last_pos
ifed01 May 10, 2018
d42bb17
os/bluestore: align BitMap allocator's init_rm_free/init_add_free par…
ifed01 May 11, 2018
5651097
os/bluestore: remove original bitmap allocator
ifed01 May 14, 2018
506dbcc
os/bluestore: more verbose logging in new bitmap allocator
ifed01 May 30, 2018
3885860
os/bluestore: cosmetic cleanup in new bitmap allocator.
ifed01 May 31, 2018
33482e0
os/bluestore: properly respect min_length when analysing partially free
ifed01 May 31, 2018
8b9fb75
os/bluestore: cosmetic new allocator internal method rename.
ifed01 May 31, 2018
ececb8f
os/bluestore: respect min_length as allocation granularity for new
ifed01 May 31, 2018
c645f2f
test/objectstore/unitetest_fastbmap_allocator: replace ASSERT_TRUE with
ifed01 Jun 1, 2018
25b7d0e
os/bluestore: perform allocations aligned with min_length in new bit…
ifed01 Jun 1, 2018
811857a
os/bluestore: rename new bitmap allocator class to BitmapAllocator.
ifed01 Jun 12, 2018
d08e2b7
os/bluestore: provide a rough estimate for bitmap allocator
ifed01 Jun 15, 2018
0bdbe66
os/bluestore: fix overlappedd allocation returned by bitmap allocator
ifed01 Jun 25, 2018
e5de298
os/bluestore: fix minor issues in bitmap logging output
ifed01 Jun 25, 2018
cf984c8
qa/objectstore: bluestore -> bluestore-{bitmap,stupid}
liewegas Nov 30, 2018
f26b219
qa/suites: fix bluestore links
liewegas Dec 5, 2018
File filter...
Filter file types
Jump to…
Jump to file or symbol
Failed to load files and symbols.

Always

Just for now

os/bluestore: respect min_length as allocation granularity for new

bitmap allocator.
It was used a real minimum threshold before this fix which allowed e.g.
allocated extent length to be equal to min_length + 1.

Signed-off-by: Igor Fedotov <ifedotov@suse.com>
(cherry picked from commit 8a188b2)
  • Loading branch information...
ifed01 committed May 31, 2018
commit ececb8f5746d80f4f6f63a82e29c3b9c8aad0a47
@@ -40,7 +40,13 @@ void AllocatorLevel01Loose::_analyze_partials(uint64_t pos_start,
if (!ctx->free_count) {
ctx->free_l1_pos = l1_pos;
} else if (l1_pos != next_free_l1_pos){
break;
// check if already found extent fits min_length
if (ctx->free_count * l1_granularity >= min_length) {
break;
}
// if not - proceed with the next one
ctx->free_l1_pos = l1_pos;
ctx->free_count = 0;
}
next_free_l1_pos = l1_pos + 1;
++ctx->free_count;
@@ -73,7 +79,8 @@ void AllocatorLevel01Loose::_analyze_partials(uint64_t pos_start,
if (l >= min_length &&
(ctx->min_affordable_len == 0 ||
(l < ctx->min_affordable_len))) {
ctx->min_affordable_len = l;

ctx->min_affordable_len = p2align(l, min_length);
ctx->min_affordable_l0_pos_start = p0;
}
if (mode == STOP_ON_PARTIAL) {
@@ -184,7 +191,7 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
interval_t res = { 0, 0 };
uint64_t l0_w = slotset_width * CHILD_PER_SLOT_L0;

if (length <= l0_granularity) {
if (unlikely(length <= l0_granularity)) {
search_ctx_t ctx;
_analyze_partials(pos_start, pos_end, l0_granularity, l0_granularity,
STOP_ON_PARTIAL, &ctx);
@@ -210,12 +217,13 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
res = interval_t(ctx.free_l1_pos * l1_granularity, l);
return res;
}
} else if (length == l1_granularity) {
} else if (unlikely(length == l1_granularity)) {
search_ctx_t ctx;
_analyze_partials(pos_start, pos_end, length, min_length, STOP_ON_EMPTY, &ctx);

// allocate exactly matched entry if any
// allocate using contiguous extent found at l1 if any
if (ctx.free_count) {

auto l = std::min(length, ctx.free_count * l1_granularity);
assert((l % l0_granularity) == 0);
auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity;
@@ -240,14 +248,9 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
return res;
}
if (ctx.min_affordable_len) {
assert(ctx.min_affordable_len >= min_length);
assert((ctx.min_affordable_len % l0_granularity) == 0);
auto pos_end = ctx.min_affordable_l0_pos_start +
ctx.min_affordable_len / l0_granularity;
_mark_alloc_l1_l0(ctx.min_affordable_l0_pos_start, pos_end);
res = interval_t(ctx.min_affordable_l0_pos_start * l0_granularity,
ctx.min_affordable_len);
return res;
auto pos0 = ctx.min_affordable_l0_pos_start;
_mark_alloc_l1_l0(pos0, pos0 + ctx.min_affordable_len / l0_granularity);
return interval_t(pos0 * l0_granularity, ctx.min_affordable_len);
}
} else {
search_ctx_t ctx;
@@ -263,10 +266,11 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
res = interval_t(ctx.affordable_l0_pos_start * l0_granularity, length);
return res;
}
// allocate exactly matched entry if any
if (ctx.free_count) {
// allocate using contiguous extent found at l1 if affordable
if (ctx.free_count && ctx.free_count * l1_granularity >= min_length) {

auto l = std::min(length, ctx.free_count * l1_granularity);
auto l = p2align(std::min(length, ctx.free_count * l1_granularity),
min_length);
assert((l % l0_granularity) == 0);
auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity;

@@ -275,14 +279,9 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
return res;
}
if (ctx.min_affordable_len) {
assert(ctx.min_affordable_len >= min_length);
assert((ctx.min_affordable_len % l0_granularity) == 0);
auto pos_end = ctx.min_affordable_l0_pos_start +
ctx.min_affordable_len / l0_granularity;
_mark_alloc_l1_l0(ctx.min_affordable_l0_pos_start, pos_end);
res = interval_t(ctx.min_affordable_l0_pos_start * l0_granularity,
ctx.min_affordable_len);
return res;
auto pos0 = ctx.min_affordable_l0_pos_start;
_mark_alloc_l1_l0(pos0, pos0 + ctx.min_affordable_len / l0_granularity);
return interval_t(pos0 * l0_granularity, ctx.min_affordable_len);
}
}
return res;
@@ -32,6 +32,7 @@ typedef std::vector<interval_t> interval_vector_t;
typedef std::vector<slot_t> slot_vector_t;
#else
#include "include/assert.h"
#include "common/likely.h"
#include "os/bluestore/bluestore_types.h"
#include "include/mempool.h"

@@ -213,14 +214,35 @@ class AllocatorLevel01Loose : public AllocatorLevel01
uint64_t len,
interval_vector_t* res)
{
auto it = res->rbegin();

if (max_length) {
if (it != res->rend() && it->offset + it->length == offset) {
auto l = max_length - it->length;
if (l >= len) {
it->length += len;
return;
} else {
offset += l;
len -= l;
it->length += l;
}
}

while (len > max_length) {
res->emplace_back(offset, max_length);
offset += max_length;
len -= max_length;
}
res->emplace_back(offset, len);
return;
}

if (it != res->rend() && it->offset + it->length == offset) {
it->length += len;
} else {
res->emplace_back(offset, len);
}
res->emplace_back(offset, len);
}

bool _allocate_l0(uint64_t length,
@@ -564,6 +586,7 @@ class AllocatorLevel02 : public AllocatorLevel

void _init(uint64_t capacity, uint64_t _alloc_unit, bool mark_as_free = true)
{
assert(isp2(_alloc_unit));
l1._init(capacity, _alloc_unit, mark_as_free);

l2_granularity =
@@ -651,7 +674,8 @@ class AllocatorLevel02 : public AllocatorLevel
{
uint64_t prev_allocated = *allocated;
uint64_t d = CHILD_PER_SLOT;
assert(min_length <= l2_granularity);
assert(isp2(min_length));
assert(min_length <= l1._level_granularity());
assert(max_length == 0 || max_length >= min_length);
assert(max_length == 0 || (max_length % min_length) == 0);
assert(length >= min_length);
@@ -421,18 +421,18 @@ TEST(TestAllocatorLevel01, test_l2_huge)
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_TRUE(a4.size() == 1);
ASSERT_TRUE(allocated4 == 0x1000);
ASSERT_TRUE(a4[0].offset == i);
ASSERT_TRUE(a4[0].length == 0x1000);
ASSERT_EQ(a4.size(), 1);
ASSERT_EQ(allocated4, 0x1000);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000);

allocated4 = 0;
a4.clear();
al2.allocate_l2(_1m - 0x1000, _1m - 0x1000, &allocated4, &a4);
ASSERT_TRUE(a4.size() == 1);
ASSERT_TRUE(allocated4 == _1m - 0x1000);
ASSERT_TRUE(a4[0].offset == i + 0x1000);
ASSERT_TRUE(a4[0].length == _1m - 0x1000);
al2.allocate_l2(_1m - 0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1);
ASSERT_EQ(allocated4, _1m - 0x1000);
ASSERT_EQ(a4[0].offset, i + 0x1000);
ASSERT_EQ(a4[0].length, _1m - 0x1000);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "allocH " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
@@ -454,7 +454,7 @@ TEST(TestAllocatorLevel01, test_l2_huge)
uint64_t allocated = 0;
interval_vector_t a;
al2.allocate_l2(0x2000, 0x2000, &allocated, &a);
ASSERT_TRUE(a.size() == 0);
ASSERT_EQ(a.size(), 0);
}
std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl;
}
@@ -465,12 +465,12 @@ TEST(TestAllocatorLevel01, test_l2_huge)
uint64_t allocated = 0;
interval_vector_t a;
al2.allocate_l2(_2m, _2m, &allocated, &a);
ASSERT_TRUE(a.size() == 0);
ASSERT_EQ(a.size(), 0);
}
std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl;
}

ASSERT_TRUE((capacity / _1m) * 0x1000 == al2.debug_get_free());
ASSERT_EQ((capacity / _1m) * 0x1000, al2.debug_get_free());

std::cout << "Done L2 Huge" << std::endl;
}
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.