Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

os/bluestore: leverage the type knowledge in BitMapAreaLeaf. #13736

Merged
merged 3 commits into from Mar 6, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
83 changes: 18 additions & 65 deletions src/os/bluestore/BitAllocator.cc
Expand Up @@ -44,60 +44,6 @@ int64_t BitMapZone::total_blocks = 0;
bool BmapEntry::m_bit_mask_init = false;
bmap_mask_vec_t BmapEntry::m_bit_to_mask;

/*
* BmapEntityList functions.
*/
void BmapEntityListIter::init(BitMapAreaList *list, int64_t start_idx, bool wrap)
{
m_list = list;
m_start_idx = start_idx;
m_cur_idx = m_start_idx;
m_wrap = wrap;
m_wrapped = false;
m_end = false;
}

BmapEntityListIter::BmapEntityListIter(BitMapAreaList *list, int64_t start_idx)
{
init(list, start_idx, false);
}

BmapEntityListIter::BmapEntityListIter(BitMapAreaList *list, int64_t start_idx, bool wrap)
{
init(list, start_idx, wrap);
}

BitMapArea* BmapEntityListIter::next()
{
int64_t cur_idx = m_cur_idx;

if (m_wrapped &&
cur_idx == m_start_idx) {
/*
* End of wrap cycle + 1
*/
if (!m_end) {
m_end = true;
return m_list->get_nth_item(cur_idx);
}
return NULL;
}
m_cur_idx++;

if (m_cur_idx == m_list->size() &&
m_wrap) {
m_cur_idx = 0;
m_wrapped = true;
}
if (cur_idx == m_list->size()) {
/*
* End of list
*/
return NULL;
}
alloc_assert(cur_idx < m_list->size());
return m_list->get_nth_item(cur_idx);
}

int64_t BmapEntityListIter::index()
{
Expand Down Expand Up @@ -370,7 +316,8 @@ int64_t BitMapZone::add_used_blocks(int64_t num_blocks)
return std::atomic_fetch_add(&m_used_blocks, (int32_t)num_blocks) + num_blocks;
}

int64_t BitMapZone::get_used_blocks()
/* Intensionally hinted because BitMapAreaLeaf::child_check_n_lock. */
inline int64_t BitMapZone::get_used_blocks()
{
return std::atomic_load(&m_used_blocks);
}
Expand Down Expand Up @@ -417,8 +364,13 @@ BitMapZone::~BitMapZone()

/*
* Check if some search took zone marker to end.
*
* The inline hint has been added intensionally because of importance of this
* method for BitMapAreaLeaf::child_check_n_lock, and thus for the overall
* allocator's performance. Examination of disassemblies coming from GCC 5.4.0
* showed that the compiler really needs that hint.
*/
bool BitMapZone::is_exhausted()
inline bool BitMapZone::is_exhausted()
{
/* BitMapZone::get_used_blocks operates atomically. No need for lock. */
return get_used_blocks() == size();
Expand Down Expand Up @@ -1095,7 +1047,10 @@ BitMapAreaLeaf::~BitMapAreaLeaf()
unlock();
}

bool BitMapAreaLeaf::child_check_n_lock(BitMapArea *child, int64_t required, bool lock)
/* Intensionally hinted because BitMapAreaLeaf::alloc_blocks_dis_int. */
inline bool BitMapAreaLeaf::child_check_n_lock(BitMapZone* const child,
const int64_t required,
const bool lock)
{
/* The exhausted check can be performed without acquiring the lock. This
* is because 1) BitMapZone::is_exhausted() actually operates atomically
Expand All @@ -1120,22 +1075,20 @@ bool BitMapAreaLeaf::child_check_n_lock(BitMapArea *child, int64_t required, boo
return true;
}

void BitMapAreaLeaf::child_unlock(BitMapArea *child)
{
child->unlock();
}

int64_t BitMapAreaLeaf::alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc,
int64_t hint, int64_t area_blk_off, ExtentList *block_list)
{
BitMapArea *child = NULL;
BitMapZone* child = nullptr;
int64_t allocated = 0;
int64_t blk_off = 0;

BmapEntityListIter iter = BmapEntityListIter(
m_child_list, hint / m_child_size_blocks, false);

while ((child = (BitMapArea *) iter.next())) {
/* We're sure the only element type we aggregate is BitMapZone,
* so there is no business to go through vptr and thus prohibit
* compiler to inline the stuff. Consult BitMapAreaLeaf::init. */
while ((child = static_cast<BitMapZone*>(iter.next()))) {
if (!child_check_n_lock(child, 1, false)) {
hint = 0;
continue;
Expand All @@ -1144,7 +1097,7 @@ int64_t BitMapAreaLeaf::alloc_blocks_dis_int(int64_t num_blocks, int64_t min_all
blk_off = child->get_index() * m_child_size_blocks + area_blk_off;
allocated += child->alloc_blocks_dis(num_blocks - allocated, min_alloc,
hint % m_child_size_blocks, blk_off, block_list);
child_unlock(child);
child->unlock();
if (allocated == num_blocks) {
break;
}
Expand Down
55 changes: 45 additions & 10 deletions src/os/bluestore/BitAllocator.h
Expand Up @@ -283,25 +283,61 @@ class BitMapAreaList {
}
};

/* Intensionally inlined for the sake of BitMapAreaLeaf::alloc_blocks_dis_int. */
class BmapEntityListIter {
BitMapAreaList *m_list;
BitMapAreaList* m_list;
int64_t m_start_idx;
int64_t m_cur_idx;
bool m_wrap;
bool m_wrapped;
bool m_end;
public:

void init(BitMapAreaList *list, int64_t start_idx, bool wrap);
BmapEntityListIter(BitMapAreaList *list);
public:
BmapEntityListIter(BitMapAreaList* const list,
const int64_t start_idx,
const bool wrap = false)
: m_list(list),
m_start_idx(start_idx),
m_cur_idx(start_idx),
m_wrap(wrap),
m_wrapped(false),
m_end(false) {
}

BitMapArea* next() {
int64_t cur_idx = m_cur_idx;

BmapEntityListIter(BitMapAreaList *list, bool wrap);
if (m_wrapped &&
cur_idx == m_start_idx) {
/*
* End of wrap cycle + 1
*/
if (!m_end) {
m_end = true;
return m_list->get_nth_item(cur_idx);
}
return NULL;
}
m_cur_idx++;

BmapEntityListIter(BitMapAreaList *list, int64_t start_idx);
if (m_cur_idx == m_list->size() &&
m_wrap) {
m_cur_idx = 0;
m_wrapped = true;
}
if (cur_idx == m_list->size()) {
/*
* End of list
*/
return NULL;
}

BmapEntityListIter(BitMapAreaList *list, int64_t start_idx, bool wrap);
/* This method should be *really* fast as it's being executed over
* and over during traversal of allocators indexes. */
alloc_dbg_assert(cur_idx < m_list->size());
return m_list->get_nth_item(cur_idx);
}

BitMapArea *next();
int64_t index();
};

Expand Down Expand Up @@ -450,8 +486,7 @@ class BitMapAreaLeaf: public BitMapAreaIN{
return false;
}

bool child_check_n_lock(BitMapArea *child, int64_t required, bool lock);
void child_unlock(BitMapArea *child);
bool child_check_n_lock(BitMapZone* child, int64_t required, bool lock);

int64_t alloc_blocks_int(int64_t num_blocks, int64_t hint, int64_t *start_block);
int64_t alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, int64_t hint,
Expand Down