Skip to content

Commit

Permalink
os/bluestore: respect min_length as allocation granularity for new
Browse files Browse the repository at this point in the history
bitmap allocator.
It was used a real minimum threshold before this fix which allowed e.g.
allocated extent length to be equal to min_length + 1.

Signed-off-by: Igor Fedotov <ifedotov@suse.com>
(cherry picked from commit 8a188b2)
  • Loading branch information
ifed01 committed Mar 17, 2019
1 parent 617a1f9 commit e3f9dec
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 38 deletions.
47 changes: 23 additions & 24 deletions src/os/bluestore/fastbmap_allocator_impl.cc
Expand Up @@ -40,7 +40,13 @@ void AllocatorLevel01Loose::_analyze_partials(uint64_t pos_start,
if (!ctx->free_count) {
ctx->free_l1_pos = l1_pos;
} else if (l1_pos != next_free_l1_pos){
break;
// check if already found extent fits min_length
if (ctx->free_count * l1_granularity >= min_length) {
break;
}
// if not - proceed with the next one
ctx->free_l1_pos = l1_pos;
ctx->free_count = 0;
}
next_free_l1_pos = l1_pos + 1;
++ctx->free_count;
Expand Down Expand Up @@ -73,7 +79,8 @@ void AllocatorLevel01Loose::_analyze_partials(uint64_t pos_start,
if (l >= min_length &&
(ctx->min_affordable_len == 0 ||
(l < ctx->min_affordable_len))) {
ctx->min_affordable_len = l;

ctx->min_affordable_len = P2ALIGN(l, min_length);
ctx->min_affordable_l0_pos_start = p0;
}
if (mode == STOP_ON_PARTIAL) {
Expand Down Expand Up @@ -184,7 +191,7 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
interval_t res = { 0, 0 };
uint64_t l0_w = slotset_width * CHILD_PER_SLOT_L0;

if (length <= l0_granularity) {
if (unlikely(length <= l0_granularity)) {
search_ctx_t ctx;
_analyze_partials(pos_start, pos_end, l0_granularity, l0_granularity,
STOP_ON_PARTIAL, &ctx);
Expand All @@ -210,12 +217,13 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
res = interval_t(ctx.free_l1_pos * l1_granularity, l);
return res;
}
} else if (length == l1_granularity) {
} else if (unlikely(length == l1_granularity)) {
search_ctx_t ctx;
_analyze_partials(pos_start, pos_end, length, min_length, STOP_ON_EMPTY, &ctx);

// allocate exactly matched entry if any
// allocate using contiguous extent found at l1 if any
if (ctx.free_count) {

auto l = std::min(length, ctx.free_count * l1_granularity);
assert((l % l0_granularity) == 0);
auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity;
Expand All @@ -240,14 +248,9 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
return res;
}
if (ctx.min_affordable_len) {
assert(ctx.min_affordable_len >= min_length);
assert((ctx.min_affordable_len % l0_granularity) == 0);
auto pos_end = ctx.min_affordable_l0_pos_start +
ctx.min_affordable_len / l0_granularity;
_mark_alloc_l1_l0(ctx.min_affordable_l0_pos_start, pos_end);
res = interval_t(ctx.min_affordable_l0_pos_start * l0_granularity,
ctx.min_affordable_len);
return res;
auto pos0 = ctx.min_affordable_l0_pos_start;
_mark_alloc_l1_l0(pos0, pos0 + ctx.min_affordable_len / l0_granularity);
return interval_t(pos0 * l0_granularity, ctx.min_affordable_len);
}
} else {
search_ctx_t ctx;
Expand All @@ -263,10 +266,11 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
res = interval_t(ctx.affordable_l0_pos_start * l0_granularity, length);
return res;
}
// allocate exactly matched entry if any
if (ctx.free_count) {
// allocate using contiguous extent found at l1 if affordable
if (ctx.free_count && ctx.free_count * l1_granularity >= min_length) {

auto l = std::min(length, ctx.free_count * l1_granularity);
auto l = P2ALIGN(std::min(length, ctx.free_count * l1_granularity),
min_length);
assert((l % l0_granularity) == 0);
auto pos_end = ctx.free_l1_pos * l0_w + l / l0_granularity;

Expand All @@ -275,14 +279,9 @@ interval_t AllocatorLevel01Loose::_allocate_l1_contiguous(uint64_t length,
return res;
}
if (ctx.min_affordable_len) {
assert(ctx.min_affordable_len >= min_length);
assert((ctx.min_affordable_len % l0_granularity) == 0);
auto pos_end = ctx.min_affordable_l0_pos_start +
ctx.min_affordable_len / l0_granularity;
_mark_alloc_l1_l0(ctx.min_affordable_l0_pos_start, pos_end);
res = interval_t(ctx.min_affordable_l0_pos_start * l0_granularity,
ctx.min_affordable_len);
return res;
auto pos0 = ctx.min_affordable_l0_pos_start;
_mark_alloc_l1_l0(pos0, pos0 + ctx.min_affordable_len / l0_granularity);
return interval_t(pos0 * l0_granularity, ctx.min_affordable_len);
}
}
return res;
Expand Down
28 changes: 26 additions & 2 deletions src/os/bluestore/fastbmap_allocator_impl.h
Expand Up @@ -33,6 +33,7 @@ typedef std::vector<interval_t> interval_vector_t;
typedef std::vector<slot_t> slot_vector_t;
#else
#include "include/assert.h"
#include "common/likely.h"
#include "os/bluestore/bluestore_types.h"
#include "include/mempool.h"

Expand Down Expand Up @@ -214,14 +215,35 @@ class AllocatorLevel01Loose : public AllocatorLevel01
uint64_t len,
interval_vector_t* res)
{
auto it = res->rbegin();

if (max_length) {
if (it != res->rend() && it->offset + it->length == offset) {
auto l = max_length - it->length;
if (l >= len) {
it->length += len;
return;
} else {
offset += l;
len -= l;
it->length += l;
}
}

while (len > max_length) {
res->emplace_back(offset, max_length);
offset += max_length;
len -= max_length;
}
res->emplace_back(offset, len);
return;
}

if (it != res->rend() && it->offset + it->length == offset) {
it->length += len;
} else {
res->emplace_back(offset, len);
}
res->emplace_back(offset, len);
}

bool _allocate_l0(uint64_t length,
Expand Down Expand Up @@ -565,6 +587,7 @@ class AllocatorLevel02 : public AllocatorLevel

void _init(uint64_t capacity, uint64_t _alloc_unit, bool mark_as_free = true)
{
assert(ISP2(_alloc_unit));
l1._init(capacity, _alloc_unit, mark_as_free);

l2_granularity =
Expand Down Expand Up @@ -652,7 +675,8 @@ class AllocatorLevel02 : public AllocatorLevel
{
uint64_t prev_allocated = *allocated;
uint64_t d = CHILD_PER_SLOT;
assert(min_length <= l2_granularity);
assert(ISP2(min_length));
assert(min_length <= l1._level_granularity());
assert(max_length == 0 || max_length >= min_length);
assert(max_length == 0 || (max_length % min_length) == 0);
assert(length >= min_length);
Expand Down
24 changes: 12 additions & 12 deletions src/test/objectstore/fastbmap_allocator_test.cc
Expand Up @@ -421,18 +421,18 @@ TEST(TestAllocatorLevel01, test_l2_huge)
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_TRUE(a4.size() == 1);
ASSERT_TRUE(allocated4 == 0x1000);
ASSERT_TRUE(a4[0].offset == i);
ASSERT_TRUE(a4[0].length == 0x1000);
ASSERT_EQ(a4.size(), 1);
ASSERT_EQ(allocated4, 0x1000);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000);

allocated4 = 0;
a4.clear();
al2.allocate_l2(_1m - 0x1000, _1m - 0x1000, &allocated4, &a4);
ASSERT_TRUE(a4.size() == 1);
ASSERT_TRUE(allocated4 == _1m - 0x1000);
ASSERT_TRUE(a4[0].offset == i + 0x1000);
ASSERT_TRUE(a4[0].length == _1m - 0x1000);
al2.allocate_l2(_1m - 0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1);
ASSERT_EQ(allocated4, _1m - 0x1000);
ASSERT_EQ(a4[0].offset, i + 0x1000);
ASSERT_EQ(a4[0].length, _1m - 0x1000);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "allocH " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
Expand All @@ -454,7 +454,7 @@ TEST(TestAllocatorLevel01, test_l2_huge)
uint64_t allocated = 0;
interval_vector_t a;
al2.allocate_l2(0x2000, 0x2000, &allocated, &a);
ASSERT_TRUE(a.size() == 0);
ASSERT_EQ(a.size(), 0);
}
std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl;
}
Expand All @@ -465,12 +465,12 @@ TEST(TestAllocatorLevel01, test_l2_huge)
uint64_t allocated = 0;
interval_vector_t a;
al2.allocate_l2(_2m, _2m, &allocated, &a);
ASSERT_TRUE(a.size() == 0);
ASSERT_EQ(a.size(), 0);
}
std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl;
}

ASSERT_TRUE((capacity / _1m) * 0x1000 == al2.debug_get_free());
ASSERT_EQ((capacity / _1m) * 0x1000, al2.debug_get_free());

std::cout << "Done L2 Huge" << std::endl;
}
Expand Down

0 comments on commit e3f9dec

Please sign in to comment.