Skip to content

Commit

Permalink
Move log entry allocation to allocator
Browse files Browse the repository at this point in the history
  • Loading branch information
ShawnZhong committed Dec 26, 2021
1 parent a85a1a7 commit 422c3a0
Show file tree
Hide file tree
Showing 8 changed files with 159 additions and 143 deletions.
25 changes: 25 additions & 0 deletions src/alloc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <utility>

#include "block.h"
#include "file.h"
#include "idx.h"

namespace ulayfs::dram {
Expand Down Expand Up @@ -110,4 +111,28 @@ void Allocator::free(const LogicalBlockIdx recycle_image[],
std::sort(free_list.begin(), free_list.end());
}

pmem::LogEntry* Allocator::alloc_log_entry(
bool pack_align, pmem::LogHeadEntry* prev_head_entry) {
// if need 16-byte alignment, maybe skip one 8-byte slot
if (pack_align) free_log_local_idx = ALIGN_UP(free_log_local_idx, 2);

if (free_log_local_idx == NUM_LOG_ENTRY) {
LogicalBlockIdx idx = alloc(1);
log_blocks.push_back(idx);
curr_log_block = &file->lidx_to_addr_rw(idx)->log_entry_block;
free_log_local_idx = 0;
if (prev_head_entry) prev_head_entry->next.next_block_idx = idx;
} else {
if (prev_head_entry)
prev_head_entry->next.next_local_idx = free_log_local_idx;
}

assert(curr_log_block != nullptr);
pmem::LogEntry* entry = curr_log_block->get(free_log_local_idx);
memset(entry, 0, sizeof(pmem::LogEntry)); // zero-out at alloc

free_log_local_idx++;
return entry;
}

} // namespace ulayfs::dram
125 changes: 95 additions & 30 deletions src/alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,96 @@
#include "posix.h"

namespace ulayfs::dram {
class File;

// per-thread data structure
// TODO: change allocator to track dram bitmap
class Allocator {
int fd;
public:
Allocator(File* file, pmem::MetaBlock* meta, Bitmap* bitmap)
: file(file),
meta(meta),
bitmap(bitmap),
recent_bitmap_local_idx(),
log_blocks(),
curr_log_block(nullptr),
free_log_local_idx(NUM_LOG_ENTRY) {
free_list.reserve(64);
}

~Allocator() {
for (const auto& [len, begin] : free_list) Bitmap::free(bitmap, begin, len);
};

/**
* allocate contiguous blocks (num_blocks must <= 64)
* if large number of blocks required, please break it into multiple alloc and
* use log entries to chain them together
*
* @param num_blocks number of blocks to allocate
* @return the logical block id of the first block
*/
[[nodiscard]] LogicalBlockIdx alloc(uint32_t num_blocks);

/**
* Free the blocks in the range [block_idx, block_idx + num_blocks)
*/
void free(LogicalBlockIdx block_idx, uint32_t num_blocks = 1);

/**
* Free an array of blocks, but the logical block indexes are not necessary
* continuous
*/
void free(const LogicalBlockIdx recycle_image[], uint32_t image_size);

/*
* LogEntry allocations
*/

/**
* allocate a log entry, possibly triggering allocating a new LogBlock
*/
pmem::LogEntry* alloc_log_entry(
bool pack_align = false, pmem::LogHeadEntry* prev_head_entry = nullptr);

// syntax sugar for union dispatching
pmem::LogHeadEntry* alloc_head_entry(
pmem::LogHeadEntry* prev_head_entry = nullptr) {
return &alloc_log_entry(/*pack_align*/ true, prev_head_entry)->head_entry;
}

pmem::LogBodyEntry* alloc_body_entry() {
return &alloc_log_entry()->body_entry;
}

/**
* get the number of free entries in the current LogBlock
*/
[[nodiscard]] uint16_t num_free_log_entries() const {
return NUM_LOG_ENTRY - free_log_local_idx;
}

/**
* get the last allocated entry's local index
*/
[[nodiscard]] LogLocalUnpackIdx last_log_local_idx() const {
return free_log_local_idx - 1;
}

[[nodiscard]] pmem::LogEntryBlock* get_curr_log_block() const {
return curr_log_block;
}

[[nodiscard]] LogicalBlockIdx get_curr_log_block_idx() const {
return log_blocks.back();
}

[[nodiscard]] LogLocalUnpackIdx get_free_log_local_idx() const {
return free_log_local_idx;
}

private:
File* file;
pmem::MetaBlock* meta;
MemTable* mem_table;

// dram bitmap
Bitmap* bitmap;
Expand All @@ -36,35 +119,17 @@ class Allocator {
// NOTE: this is the index within recent_bitmap_block
BitmapLocalIdx recent_bitmap_local_idx;

public:
Allocator(int fd, pmem::MetaBlock* meta, MemTable* mem_table, Bitmap* bitmap)
: fd(fd),
meta(meta),
mem_table(mem_table),
bitmap(bitmap),
recent_bitmap_local_idx() {
free_list.reserve(64);
}

~Allocator() {
for (const auto& [len, begin] : free_list) Bitmap::free(bitmap, begin, len);
};

// allocate contiguous blocks (num_blocks must <= 64)
// if large number of blocks required, please break it into multiple alloc
// and use log entries to chain them together
[[nodiscard]] LogicalBlockIdx alloc(uint32_t num_blocks);

/**
* Free the blocks in the range [block_idx, block_idx + num_blocks)
/*
* LogEntry allocations
*/
void free(LogicalBlockIdx block_idx, uint32_t num_blocks = 1);

/**
* Free an array of blocks, but the logical block indexes are not necessary
* continuous
*/
void free(const LogicalBlockIdx recycle_image[], uint32_t image_size);
// blocks for storing log entries, max 512 entries per block
std::vector<LogicalBlockIdx> log_blocks;
// pointer to current LogBlock == the one identified by log_blocks.back()
pmem::LogEntryBlock* curr_log_block;
// local index of the first free entry slot in the last block
// might equal NUM_LOCAL_ENTREIS when a new log block is not allocated yet
LogLocalUnpackIdx free_log_local_idx;
};

} // namespace ulayfs::dram
3 changes: 1 addition & 2 deletions src/btable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ void BlkTable::update(TxEntryIdx& tx_idx, pmem::TxBlock*& tx_block,
return;
}

auto log_mgr = file->get_local_log_mgr();
LogicalBlockIdx prev_tx_block_idx = 0;

while (true) {
Expand All @@ -29,7 +28,7 @@ void BlkTable::update(TxEntryIdx& tx_idx, pmem::TxBlock*& tx_block,
if (tx_entry.is_inline())
apply_tx(tx_entry.commit_inline_entry);
else
apply_tx(tx_entry.commit_entry, log_mgr, init_bitmap);
apply_tx(tx_entry.commit_entry, &file->log_mgr, init_bitmap);
prev_tx_block_idx = tail_tx_idx.block_idx;
if (!tx_mgr->advance_tx_idx(tail_tx_idx, tail_tx_block, do_alloc)) break;
}
Expand Down
15 changes: 2 additions & 13 deletions src/file.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ File::File(int fd, const struct stat& stat, int flags)
mem_table(fd, stat.st_size, (flags & O_ACCMODE) == O_RDONLY),
meta(mem_table.get_meta()),
tx_mgr(this, meta),
log_mgr(this, meta),
blk_table(this, &tx_mgr),
file_offset(0),
flags(flags) {
Expand Down Expand Up @@ -52,7 +53,6 @@ File::~File() {
posix::close(fd);
posix::close(shm_fd);
allocators.clear();
log_mgrs.clear();
}

/*
Expand Down Expand Up @@ -172,22 +172,11 @@ Allocator* File::get_local_allocator() {
return &it->second;
}

auto [it, ok] =
allocators.emplace(tid, Allocator(fd, meta, &mem_table, bitmap));
auto [it, ok] = allocators.emplace(tid, Allocator(this, meta, bitmap));
PANIC_IF(!ok, "insert to thread-local allocators failed");
return &it->second;
}

LogMgr* File::get_local_log_mgr() {
if (auto it = log_mgrs.find(tid); it != log_mgrs.end()) {
return &it->second;
}

auto [it, ok] = log_mgrs.emplace(tid, LogMgr(this, meta));
PANIC_IF(!ok, "insert to thread-local log_mgrs failed");
return &it->second;
}

/*
* Helper functions
*/
Expand Down
8 changes: 3 additions & 5 deletions src/file.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ class File {
MemTable mem_table;
pmem::MetaBlock* meta;
TxMgr tx_mgr;
LogMgr log_mgr;
BlkTable blk_table;

int shm_fd;
Expand All @@ -36,13 +37,10 @@ class File {
// the allocator is a per-thread per-file data structure
tbb::concurrent_unordered_map<pid_t, Allocator> allocators;

// each thread tid has its local log_mgr
// the log_mgr is a per-thread per-file data structure
tbb::concurrent_unordered_map<pid_t, LogMgr> log_mgrs;

friend class TxMgr;
friend class LogMgr;
friend class BlkTable;
friend class Allocator;

public:
File(int fd, const struct stat& stat, int flags);
Expand All @@ -62,7 +60,6 @@ class File {
* Getters & removers for thread-local data structures
*/
[[nodiscard]] Allocator* get_local_allocator();
[[nodiscard]] LogMgr* get_local_log_mgr();

private:
/**
Expand Down Expand Up @@ -129,6 +126,7 @@ class File {
int open_shm(const char* shm_path, const struct stat& stat, Bitmap*& bitmap);

friend std::ostream& operator<<(std::ostream& out, const File& f);
friend std::ostream& operator<<(std::ostream& out, const TxMgr& tx_mgr);
};

} // namespace ulayfs::dram
44 changes: 12 additions & 32 deletions src/log.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,22 +66,24 @@ void LogMgr::get_coverage(LogEntryIdx first_head_idx,
}

LogEntryIdx LogMgr::append(
pmem::LogOp op, uint16_t leftover_bytes, uint32_t total_blocks,
VirtualBlockIdx begin_virtual_idx,
Allocator* allocator, pmem::LogOp op, uint16_t leftover_bytes,
uint32_t total_blocks, VirtualBlockIdx begin_virtual_idx,
const std::vector<LogicalBlockIdx>& begin_logical_idxs, bool fenced) {
// allocate the first head entry, whose LogEntryIdx will be returned back
// to the transaction
pmem::LogHeadEntry* head_entry = alloc_head_entry();
LogEntryUnpackIdx first_head_idx{log_blocks.back(), last_local_idx()};
pmem::LogHeadEntry* head_entry = allocator->alloc_head_entry();
LogEntryUnpackIdx first_head_idx{allocator->get_curr_log_block_idx(),
allocator->last_log_local_idx()};
VirtualBlockIdx now_virtual_idx = begin_virtual_idx;
size_t now_logical_idx_off = 0;

while (head_entry != nullptr) {
LogLocalUnpackIdx persist_start_idx = last_local_idx();
LogLocalUnpackIdx persist_start_idx = allocator->last_log_local_idx();
head_entry->op = op;

uint32_t num_blocks = total_blocks;
uint32_t max_blocks = num_free_entries() * MAX_BLOCKS_PER_BODY;
uint32_t max_blocks =
allocator->num_free_log_entries() * MAX_BLOCKS_PER_BODY;
if (num_blocks > max_blocks) {
num_blocks = max_blocks;
head_entry->overflow = true;
Expand All @@ -97,7 +99,7 @@ LogEntryIdx LogMgr::append(

// populate body entries until done or until current LogBlock filled up
while (num_blocks > 0) {
pmem::LogBodyEntry* body_entry = alloc_body_entry();
pmem::LogBodyEntry* body_entry = allocator->alloc_body_entry();
assert(now_logical_idx_off < begin_logical_idxs.size());
body_entry->begin_virtual_idx = now_virtual_idx;
body_entry->begin_logical_idx = begin_logical_idxs[now_logical_idx_off++];
Expand All @@ -107,37 +109,15 @@ LogEntryIdx LogMgr::append(
: num_blocks - MAX_BLOCKS_PER_BODY;
}

curr_block->persist(persist_start_idx, free_local_idx, fenced);
allocator->get_curr_log_block()->persist(
persist_start_idx, allocator->get_free_log_local_idx(), fenced);
if (head_entry->overflow)
head_entry = alloc_head_entry(head_entry);
head_entry = allocator->alloc_head_entry(head_entry);
else
head_entry = nullptr;
}

return LogEntryUnpackIdx::to_pack_idx(first_head_idx);
}

pmem::LogEntry* LogMgr::alloc_entry(bool pack_align,
pmem::LogHeadEntry* prev_head_entry) {
// if need 16-byte alignment, maybe skip one 8-byte slot
if (pack_align) free_local_idx = ALIGN_UP(free_local_idx, 2);

if (free_local_idx == NUM_LOG_ENTRY) {
LogicalBlockIdx idx = file->get_local_allocator()->alloc(1);
log_blocks.push_back(idx);
curr_block = &file->lidx_to_addr_rw(idx)->log_entry_block;
free_local_idx = 0;
if (prev_head_entry) prev_head_entry->next.next_block_idx = idx;
} else {
if (prev_head_entry) prev_head_entry->next.next_local_idx = free_local_idx;
}

assert(curr_block != nullptr);
pmem::LogEntry* entry = curr_block->get(free_local_idx);
memset(entry, 0, sizeof(pmem::LogEntry)); // zero-out at alloc

free_local_idx++;
return entry;
}

} // namespace ulayfs::dram
Loading

0 comments on commit 422c3a0

Please sign in to comment.