Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mimic: os/bluestore: backport new bitmap allocator #26983

Merged
merged 21 commits into from Apr 1, 2019
Merged
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
480b285
test/allocator: get rid off bitmap allocator specifics and introduce new
ifed01 May 3, 2018
dd0912f
os/bluestore: add new bitmap allocator
ifed01 May 3, 2018
ba5c402
test/allocator: move bluestore allocator's benchmarks to a standalone UT
ifed01 May 4, 2018
d8d6cdb
os/bluestore: get rid off allocator's reserve method
ifed01 May 7, 2018
4773293
os/bluestore: add release(PExtentVector) helper to Allocator class to
ifed01 May 7, 2018
5525533
os/bluestore: fix improper access to a BitmapFastAllocator::last_pos
ifed01 May 10, 2018
d42bb17
os/bluestore: align BitMap allocator's init_rm_free/init_add_free par…
ifed01 May 11, 2018
5651097
os/bluestore: remove original bitmap allocator
ifed01 May 14, 2018
506dbcc
os/bluestore: more verbose logging in new bitmap allocator
ifed01 May 30, 2018
3885860
os/bluestore: cosmetic cleanup in new bitmap allocator.
ifed01 May 31, 2018
33482e0
os/bluestore: properly respect min_length when analysing partially free
ifed01 May 31, 2018
8b9fb75
os/bluestore: cosmetic new allocator internal method rename.
ifed01 May 31, 2018
ececb8f
os/bluestore: respect min_length as allocation granularity for new
ifed01 May 31, 2018
c645f2f
test/objectstore/unitetest_fastbmap_allocator: replace ASSERT_TRUE with
ifed01 Jun 1, 2018
25b7d0e
os/bluestore: perform allocations aligned with min_length in new bit…
ifed01 Jun 1, 2018
811857a
os/bluestore: rename new bitmap allocator class to BitmapAllocator.
ifed01 Jun 12, 2018
d08e2b7
os/bluestore: provide a rough estimate for bitmap allocator
ifed01 Jun 15, 2018
0bdbe66
os/bluestore: fix overlappedd allocation returned by bitmap allocator
ifed01 Jun 25, 2018
e5de298
os/bluestore: fix minor issues in bitmap logging output
ifed01 Jun 25, 2018
cf984c8
qa/objectstore: bluestore -> bluestore-{bitmap,stupid}
liewegas Nov 30, 2018
f26b219
qa/suites: fix bluestore links
liewegas Dec 5, 2018
File filter...
Filter file types
Jump to…
Jump to file or symbol
Failed to load files and symbols.

Always

Just for now

Next

test/allocator: get rid off bitmap allocator specifics and introduce new

performance test cases

Signed-off-by: Igor Fedotov <ifedotov@suse.com>
(cherry picked from commit dcd3061)
  • Loading branch information...
ifed01 committed May 3, 2018
commit 480b28549556e8079082500b86b12e11657d0a05
@@ -14,8 +14,9 @@
#include "include/stringify.h"
#include "include/Context.h"
#include "os/bluestore/Allocator.h"
#include "os/bluestore/BitAllocator.h"

#include <boost/random/uniform_int.hpp>
typedef boost::mt11213b gen_type;

#if GTEST_HAS_PARAM_TEST

@@ -36,26 +37,27 @@ class AllocTest : public ::testing::TestWithParam<const char*> {

TEST_P(AllocTest, test_alloc_init)
{
int64_t blocks = BmapEntry::size();
int64_t blocks = 64;
init_alloc(blocks, 1);
ASSERT_EQ(0U, alloc->get_free());
alloc->shutdown();
blocks = BitMapZone::get_total_blocks() * 2 + 16;
blocks = 1024 * 2 + 16;
init_alloc(blocks, 1);
ASSERT_EQ(0U, alloc->get_free());
alloc->shutdown();
blocks = BitMapZone::get_total_blocks() * 2;
blocks = 1024 * 2;
init_alloc(blocks, 1);
ASSERT_EQ(alloc->get_free(), (uint64_t) 0);
}

TEST_P(AllocTest, test_alloc_min_alloc)
{
int64_t block_size = 1024;
int64_t blocks = BitMapZone::get_total_blocks() * 2 * block_size;
int64_t capacity = 4 * 1024 * block_size;

{
init_alloc(blocks, block_size);
init_alloc(capacity, block_size);

alloc->init_add_free(block_size, block_size);
PExtentVector extents;
EXPECT_EQ(block_size, alloc->allocate(block_size, block_size,
@@ -96,9 +98,9 @@ TEST_P(AllocTest, test_alloc_min_alloc)
TEST_P(AllocTest, test_alloc_min_max_alloc)
{
int64_t block_size = 1024;
int64_t blocks = BitMapZone::get_total_blocks() * 2 * block_size;

init_alloc(blocks, block_size);
int64_t capacity = 4 * 1024 * block_size;
init_alloc(capacity, block_size);

/*
* Make sure we get all extents different when
@@ -169,9 +171,9 @@ TEST_P(AllocTest, test_alloc_min_max_alloc)
TEST_P(AllocTest, test_alloc_failure)
{
int64_t block_size = 1024;
int64_t blocks = BitMapZone::get_total_blocks() * block_size;
int64_t capacity = 4 * 1024 * block_size;

init_alloc(blocks, block_size);
init_alloc(capacity, block_size);
{
alloc->init_add_free(0, block_size * 256);
alloc->init_add_free(block_size * 512, block_size * 256);
@@ -206,62 +208,6 @@ TEST_P(AllocTest, test_alloc_big)
}
}

TEST_P(AllocTest, test_alloc_hint_bmap)
{
if (GetParam() == std::string("stupid")) {
return;
}
int64_t blocks = BitMapArea::get_level_factor(g_ceph_context, 2) * 4;
int64_t allocated = 0;
int64_t zone_size = 1024;
g_conf->set_val("bluestore_bitmapallocator_blocks_per_zone",
std::to_string(zone_size));

init_alloc(blocks, 1);
alloc->init_add_free(0, blocks);

PExtentVector extents;

allocated = alloc->allocate(1, 1, 1, zone_size, &extents);
ASSERT_EQ(1, allocated);
ASSERT_EQ(1u, extents.size());
ASSERT_EQ(extents[0].offset, (uint64_t) zone_size);

extents.clear();
allocated = alloc->allocate(1, 1, 1, zone_size * 2 - 1, &extents);
EXPECT_EQ(1, allocated);
ASSERT_EQ(1u, extents.size());
EXPECT_EQ((int64_t) extents[0].offset, zone_size * 2 - 1);

/*
* Wrap around with hint
*/
extents.clear();
allocated = alloc->allocate(zone_size * 2, 1, 1, blocks - zone_size * 2,
&extents);
ASSERT_EQ(zone_size * 2, allocated);
EXPECT_EQ(zone_size * 2, (int)extents.size());
EXPECT_EQ((int64_t)extents[0].offset, blocks - zone_size * 2);

extents.clear();
allocated = alloc->allocate(zone_size, 1, 1, blocks - zone_size, &extents);
ASSERT_EQ(zone_size, allocated);
EXPECT_EQ(zone_size, (int)extents.size());
EXPECT_EQ(extents[0].offset, (uint64_t) 0);
/*
* Verify out-of-bound hint
*/
extents.clear();
allocated = alloc->allocate(1, 1, 1, blocks, &extents);
ASSERT_EQ(1, allocated);
EXPECT_EQ(1, (int)extents.size());

extents.clear();
allocated = alloc->allocate(1, 1, 1, blocks * 3 + 1 , &extents);
ASSERT_EQ(1, allocated);
EXPECT_EQ(1, (int)extents.size());
}

TEST_P(AllocTest, test_alloc_non_aligned_len)
{
int64_t block_size = 1 << 12;
@@ -330,6 +276,176 @@ TEST_P(AllocTest, test_alloc_fragmentation)
EXPECT_EQ(0, uint64_t(alloc->get_fragmentation(alloc_unit) * 100));
}

const uint64_t _1m = 1024 * 1024;
const uint64_t _2m = 2 * 1024 * 1024;

TEST_P(AllocTest, test_alloc_bench_seq)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
uint64_t alloc_unit = 4096;
uint64_t want_size = alloc_unit;
PExtentVector allocated, tmp;

init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);

utime_t start = ceph_clock_now();
for (uint64_t i = 0; i < capacity; i += want_size)
{
tmp.clear();
EXPECT_EQ(0, alloc->reserve(want_size));
EXPECT_EQ(want_size, alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}

std::cout << "releasing..." << std::endl;
for (size_t i = 0; i < capacity; i += want_size)
{
interval_set<uint64_t> release_set;
release_set.insert(i, want_size);
alloc->release(release_set);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "release " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl;
}

class AllocTracker
{
std::vector<uint64_t> allocations;
uint64_t head = 0;
uint64_t tail = 0;
uint64_t size = 0;
boost::uniform_int<> u1;

public:
AllocTracker(uint64_t capacity, uint64_t alloc_unit)
: u1(capacity, alloc_unit)
{
assert(alloc_unit >= 0x100);
assert(capacity <= (uint64_t(1) << 48)); // we use 5 octets (bytes 1 - 5) to store
// offset to save the required space.
// This supports capacity up to 281 TB

allocations.resize(capacity / alloc_unit);
}
inline uint64_t get_head() const
{
return head;
}

inline uint64_t get_tail() const
{
return tail;
}

bool push(uint64_t offs, uint32_t len)
{
assert((len & 0xff) == 0);
assert((offs & 0xff) == 0);
assert((offs & 0xffff000000000000) == 0);

if (head + 1 == tail)
return false;
uint64_t val = (offs << 16) | (len >> 8);
allocations[head++] = val;
head %= allocations.size();
++size;
return true;
}
bool pop(uint64_t* offs, uint32_t* len)
{
if (size == 0)
return false;
uint64_t val = allocations[tail++];
*len = uint64_t((val & 0xffffff) << 8);
*offs = (val >> 16) & ~uint64_t(0xff);
tail %= allocations.size();
--size;
return true;
}
bool pop_random(gen_type& rng, uint64_t* offs, uint32_t* len,
uint32_t max_len = 0)
{
if (size == 0)
return false;

uint64_t pos = (u1(rng) % size) + tail;
pos %= allocations.size();
uint64_t val = allocations[pos];
*len = uint64_t((val & 0xffffff) << 8);
*offs = (val >> 16) & ~uint64_t(0xff);
if (max_len && *len > max_len) {
val = ((*offs + max_len) << 16) | ((*len - max_len) >> 8);
allocations[pos] = val;
*len = max_len;
} else {
allocations[pos] = allocations[tail++];
tail %= allocations.size();
--size;
}
return true;
}
};

TEST_P(AllocTest, test_alloc_bench)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
uint64_t alloc_unit = 4096;
PExtentVector allocated, tmp;
AllocTracker at(capacity, alloc_unit);

init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);

gen_type rng(time(NULL));
boost::uniform_int<> u1(0, 9); // 4K-2M
boost::uniform_int<> u2(0, 7); // 4K-512K

utime_t start = ceph_clock_now();
for (uint64_t i = 0; i < capacity * 2; )
{
uint32_t want = alloc_unit << u1(rng);
auto r = alloc->reserve(want);
if (r != 0) {
break;
}
i += want;
tmp.clear();

EXPECT_EQ(want, alloc->allocate(want, alloc_unit, 0, 0, &tmp));
for(auto a : tmp) {
bool full = !at.push(a.offset, a.length);
EXPECT_EQ(full, false);
}
uint64_t want_release = alloc_unit << u2(rng);
uint64_t released = 0;
do {
uint64_t o = 0;
uint32_t l = 0;
interval_set<uint64_t> release_set;
if (!at.pop_random(rng, &o, &l, want_release - released)) {
break;
}
release_set.insert(o, l);
alloc->release(release_set);
released += l;
} while (released < want_release);

if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl;
std::cout<<"Avail "<< alloc->get_free() / _1m << " MB" << std::endl;
}

INSTANTIATE_TEST_CASE_P(
Allocator,
AllocTest,
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.