Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mimic: os/bluestore: backport new bitmap allocator #26983

Merged
merged 21 commits into from Apr 1, 2019
Merged
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
480b285
test/allocator: get rid off bitmap allocator specifics and introduce new
ifed01 May 3, 2018
dd0912f
os/bluestore: add new bitmap allocator
ifed01 May 3, 2018
ba5c402
test/allocator: move bluestore allocator's benchmarks to a standalone UT
ifed01 May 4, 2018
d8d6cdb
os/bluestore: get rid off allocator's reserve method
ifed01 May 7, 2018
4773293
os/bluestore: add release(PExtentVector) helper to Allocator class to
ifed01 May 7, 2018
5525533
os/bluestore: fix improper access to a BitmapFastAllocator::last_pos
ifed01 May 10, 2018
d42bb17
os/bluestore: align BitMap allocator's init_rm_free/init_add_free par…
ifed01 May 11, 2018
5651097
os/bluestore: remove original bitmap allocator
ifed01 May 14, 2018
506dbcc
os/bluestore: more verbose logging in new bitmap allocator
ifed01 May 30, 2018
3885860
os/bluestore: cosmetic cleanup in new bitmap allocator.
ifed01 May 31, 2018
33482e0
os/bluestore: properly respect min_length when analysing partially free
ifed01 May 31, 2018
8b9fb75
os/bluestore: cosmetic new allocator internal method rename.
ifed01 May 31, 2018
ececb8f
os/bluestore: respect min_length as allocation granularity for new
ifed01 May 31, 2018
c645f2f
test/objectstore/unitetest_fastbmap_allocator: replace ASSERT_TRUE with
ifed01 Jun 1, 2018
25b7d0e
os/bluestore: perform allocations aligned with min_length in new bit…
ifed01 Jun 1, 2018
811857a
os/bluestore: rename new bitmap allocator class to BitmapAllocator.
ifed01 Jun 12, 2018
d08e2b7
os/bluestore: provide a rough estimate for bitmap allocator
ifed01 Jun 15, 2018
0bdbe66
os/bluestore: fix overlappedd allocation returned by bitmap allocator
ifed01 Jun 25, 2018
e5de298
os/bluestore: fix minor issues in bitmap logging output
ifed01 Jun 25, 2018
cf984c8
qa/objectstore: bluestore -> bluestore-{bitmap,stupid}
liewegas Nov 30, 2018
f26b219
qa/suites: fix bluestore links
liewegas Dec 5, 2018
File filter...
Filter file types
Jump to…
Jump to file or symbol
Failed to load files and symbols.

Always

Just for now

os/bluestore: provide a rough estimate for bitmap allocator

fragmentation.
The approach is counting 'partial' and 'free' slots at L1 on the fly and
use partial / (partial+free) value as rough fragmentation estimate.

Signed-off-by: Igor Fedotov <ifedotov@suse.com>
(cherry picked from commit dbbe12f)
  • Loading branch information...
ifed01 committed Jun 15, 2018
commit d08e2b798186d6d54d71c91dbfc14912fec445bf
@@ -38,6 +38,10 @@ class BitmapAllocator : public Allocator,
void dump() override
{
}
double get_fragmentation(uint64_t) override
{
return _get_fragmentation();
}

void init_add_free(uint64_t offset, uint64_t length) override;
void init_rm_free(uint64_t offset, uint64_t length) override;
@@ -218,51 +218,63 @@ void AllocatorLevel01Loose::_mark_l1_on_l0(int64_t l0_pos, int64_t l0_pos_end)

int64_t idx = l0_pos / bits_per_slot;
int64_t idx_end = l0_pos_end / bits_per_slot;
bool was_all_free = true;
bool was_all_allocated = true;
slot_t mask_to_apply = L1_ENTRY_NOT_USED;

auto l1_pos = l0_pos / d0;

while (idx < idx_end) {
if (l0[idx] == all_slot_clear) {
was_all_free = false;

// if not all prev slots are allocated then no need to check the
// current slot set, it's partial
++idx;
idx =
was_all_allocated ? idx : p2roundup(idx, int64_t(slotset_width));
if (mask_to_apply == L1_ENTRY_NOT_USED) {
mask_to_apply = L1_ENTRY_FULL;
} else if (mask_to_apply != L1_ENTRY_FULL) {
idx = p2roundup(idx, int64_t(slotset_width));
mask_to_apply = L1_ENTRY_PARTIAL;
}
} else if (l0[idx] == all_slot_set) {
// all free
was_all_allocated = false;
// if not all prev slots are free then no need to check the
// current slot set, it's partial
++idx;
idx = was_all_free ? idx : p2roundup(idx, int64_t(slotset_width));
if (mask_to_apply == L1_ENTRY_NOT_USED) {
mask_to_apply = L1_ENTRY_FREE;
} else if (mask_to_apply != L1_ENTRY_FREE) {
idx = p2roundup(idx, int64_t(slotset_width));
mask_to_apply = L1_ENTRY_PARTIAL;
}
} else {
// no need to check the current slot set, it's partial
was_all_free = false;
was_all_allocated = false;
mask_to_apply = L1_ENTRY_PARTIAL;
++idx;
idx = p2roundup(idx, int64_t(slotset_width));
}
if ((idx % slotset_width) == 0) {

assert(mask_to_apply != L1_ENTRY_NOT_USED);
uint64_t shift = (l1_pos % l1_w) * L1_ENTRY_WIDTH;
slot_t& slot_val = l1[l1_pos / l1_w];
slot_val &= ~(uint64_t(L1_ENTRY_MASK) << shift);

if (was_all_allocated) {
assert(!was_all_free);
slot_val |= uint64_t(L1_ENTRY_FULL) << shift;
} else if (was_all_free) {
assert(!was_all_allocated);
slot_val |= uint64_t(L1_ENTRY_FREE) << shift;
} else {
slot_val |= uint64_t(L1_ENTRY_PARTIAL) << shift;
auto mask = slot_t(L1_ENTRY_MASK) << shift;

slot_t old_mask = (slot_val & mask) >> shift;
switch(old_mask) {
case L1_ENTRY_FREE:
unalloc_l1_count--;
break;
case L1_ENTRY_PARTIAL:
partial_l1_count--;
break;
}
slot_val &= ~mask;
slot_val |= slot_t(mask_to_apply) << shift;
switch(mask_to_apply) {
case L1_ENTRY_FREE:
unalloc_l1_count++;
break;
case L1_ENTRY_PARTIAL:
partial_l1_count++;
break;
}
was_all_free = true;
was_all_allocated = true;
mask_to_apply = L1_ENTRY_NOT_USED;
++l1_pos;
}
}
@@ -465,14 +477,27 @@ bool AllocatorLevel01Loose::_allocate_l1(uint64_t length,
(idx * d1 + free_pos / L1_ENTRY_WIDTH + 1) * l0_w,
allocated,
res);
slot_val &= (~slot_t(L1_ENTRY_MASK)) << free_pos;

auto mask = slot_t(L1_ENTRY_MASK) << free_pos;

slot_t old_mask = (slot_val & mask) >> free_pos;
switch(old_mask) {
case L1_ENTRY_FREE:
unalloc_l1_count--;
break;
case L1_ENTRY_PARTIAL:
partial_l1_count--;
break;
}
slot_val &= ~mask;
if (empty) {
// the next line is no op with the current L1_ENTRY_FULL but left
// as-is for the sake of uniformity and to avoid potential errors
// in future
slot_val |= slot_t(L1_ENTRY_FULL) << free_pos;
} else {
slot_val |= slot_t(L1_ENTRY_PARTIAL) << free_pos;
partial_l1_count++;
}
if (length <= *allocated || slot_val == all_slot_clear) {
break;
@@ -96,6 +96,18 @@ class AllocatorLevel01 : public AllocatorLevel
uint64_t l0_granularity = 0; // space per entry
uint64_t l1_granularity = 0; // space per entry

size_t partial_l1_count = 0;
size_t unalloc_l1_count = 0;

double get_fragmentation() const {
double res = 0.0;
auto total = unalloc_l1_count + partial_l1_count;
if (total) {
res = double(partial_l1_count) / double(total);
}
return res;
}

uint64_t _level_granularity() const override
{
return l1_granularity;
@@ -122,6 +134,7 @@ class AllocatorLevel01Loose : public AllocatorLevel01
L1_ENTRY_MASK = (1 << L1_ENTRY_WIDTH) - 1,
L1_ENTRY_FULL = 0x00,
L1_ENTRY_PARTIAL = 0x01,
L1_ENTRY_NOT_USED = 0x02,
L1_ENTRY_FREE = 0x03,
CHILD_PER_SLOT = bits_per_slot / L1_ENTRY_WIDTH, // 32
CHILD_PER_SLOT_L0 = bits_per_slot, // 64
@@ -265,11 +278,13 @@ class AllocatorLevel01Loose : public AllocatorLevel01
l1.resize(slot_count, mark_as_free ? all_slot_set : all_slot_clear);

// l0 slot count
slot_count = aligned_capacity / _alloc_unit / bits_per_slot;
size_t slot_count_l0 = aligned_capacity / _alloc_unit / bits_per_slot;
// we use set bit(s) as a marker for (partially) free entry
l0.resize(slot_count, mark_as_free ? all_slot_set : all_slot_clear);
l0.resize(slot_count_l0, mark_as_free ? all_slot_set : all_slot_clear);

partial_l1_count = unalloc_l1_count = 0;
if (mark_as_free) {
unalloc_l1_count = slot_count * _children_per_slot();
auto l0_pos_no_use = p2roundup((int64_t)capacity, (int64_t)l0_granularity) / l0_granularity;
_mark_alloc_l1_l0(l0_pos_no_use, aligned_capacity / l0_granularity);
}
@@ -729,6 +744,10 @@ class AllocatorLevel02 : public AllocatorLevel
{
last_pos = 0;
}
double _get_fragmentation() {
std::lock_guard<std::mutex> l(lock);
return l1.get_fragmentation();
}
};

#endif
@@ -227,24 +227,28 @@ TEST_P(AllocTest, test_alloc_non_aligned_len)

TEST_P(AllocTest, test_alloc_fragmentation)
{
if (GetParam() == std::string("bitmap")) {
return;
}
uint64_t capacity = 4 * 1024 * 1024;
uint64_t alloc_unit = 4096;
uint64_t want_size = alloc_unit;
PExtentVector allocated, tmp;

init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
bool bitmap_alloc = GetParam() == std::string("bitmap");

EXPECT_EQ(0.0, alloc->get_fragmentation(alloc_unit));

for (size_t i = 0; i < capacity / alloc_unit; ++i)
{
tmp.clear();
EXPECT_EQ(want_size, alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));
allocated.insert(allocated.end(), tmp.begin(), tmp.end());
EXPECT_EQ(0.0, alloc->get_fragmentation(alloc_unit));

// bitmap fragmentation calculation doesn't provide such constant
// estimate
if (!bitmap_alloc) {
EXPECT_EQ(0.0, alloc->get_fragmentation(alloc_unit));
}
}
EXPECT_EQ(-ENOSPC, alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));

@@ -261,8 +265,13 @@ TEST_P(AllocTest, test_alloc_fragmentation)
release_set.insert(allocated[i].offset, allocated[i].length);
alloc->release(release_set);
}
// fragmentation approx = 257 intervals / 768 max intervals
EXPECT_EQ(33, uint64_t(alloc->get_fragmentation(alloc_unit) * 100));
if (bitmap_alloc) {
// fragmentation = one l1 slot is free + one l1 slot is partial
EXPECT_EQ(50, uint64_t(alloc->get_fragmentation(alloc_unit) * 100));
} else {
// fragmentation approx = 257 intervals / 768 max intervals
EXPECT_EQ(33, uint64_t(alloc->get_fragmentation(alloc_unit) * 100));
}

for (size_t i = allocated.size() / 2 + 1; i < allocated.size(); i += 2)
{
@@ -276,7 +285,6 @@ TEST_P(AllocTest, test_alloc_fragmentation)
// digits after decimal point due to this.
EXPECT_EQ(0, uint64_t(alloc->get_fragmentation(alloc_unit) * 100));
}

INSTANTIATE_TEST_CASE_P(
Allocator,
AllocTest,
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.