197 changes: 197 additions & 0 deletions libc/test/src/__support/block_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <stddef.h>

#include "src/__support/CPP/array.h"
#include "src/__support/CPP/bit.h"
#include "src/__support/CPP/span.h"
#include "src/__support/block.h"
#include "src/string/memcpy.h"
Expand Down Expand Up @@ -36,6 +37,7 @@ using SmallOffsetBlock = LIBC_NAMESPACE::Block<uint16_t>;
template <typename BlockType> void LlvmLibcBlockTest##TestCase::RunTest()

using LIBC_NAMESPACE::cpp::array;
using LIBC_NAMESPACE::cpp::bit_ceil;
using LIBC_NAMESPACE::cpp::byte;
using LIBC_NAMESPACE::cpp::span;

Expand Down Expand Up @@ -567,3 +569,198 @@ TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
const BlockType *block2 = BlockType::from_usable_space(ptr);
EXPECT_EQ(block1, block2);
}

TEST_FOR_EACH_BLOCK_TYPE(CanAllocate) {
constexpr size_t kN = 1024;

// Ensure we can allocate everything up to the block size within this block.
for (size_t i = 0; i < kN - BlockType::BLOCK_OVERHEAD; ++i) {
alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
auto result = BlockType::init(bytes);
ASSERT_TRUE(result.has_value());
BlockType *block = *result;

constexpr size_t ALIGN = 1; // Effectively ignores alignment.
EXPECT_TRUE(block->can_allocate(ALIGN, i));

// For each can_allocate, we should be able to do a successful call to
// allocate.
auto info = BlockType::allocate(block, ALIGN, i);
EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
}

alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
auto result = BlockType::init(bytes);
ASSERT_TRUE(result.has_value());
BlockType *block = *result;

// Given a block of size kN (assuming it's also a power of two), we should be
// able to allocate a block within it that's aligned to half its size. This is
// because regardless of where the buffer is located, we can always find a
// starting location within it that meets this alignment.
EXPECT_TRUE(block->can_allocate(kN / 2, 1));
auto info = BlockType::allocate(block, kN / 2, 1);
EXPECT_NE(info.block, static_cast<BlockType *>(nullptr));
}

TEST_FOR_EACH_BLOCK_TYPE(AllocateAlreadyAligned) {
constexpr size_t kN = 1024;

alignas(BlockType::ALIGNMENT) array<byte, kN> bytes{};
auto result = BlockType::init(bytes);
ASSERT_TRUE(result.has_value());
BlockType *block = *result;

// This should result in no new blocks.
constexpr size_t kAlignment = BlockType::ALIGNMENT;
constexpr size_t kExpectedSize = BlockType::ALIGNMENT;
EXPECT_TRUE(block->can_allocate(kAlignment, kExpectedSize));

auto [aligned_block, prev, next] =
BlockType::allocate(block, BlockType::ALIGNMENT, kExpectedSize);

// Since this is already aligned, there should be no previous block.
EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));

// Ensure we the block is aligned and the size we expect.
EXPECT_NE(aligned_block, static_cast<BlockType *>(nullptr));
EXPECT_TRUE(aligned_block->is_usable_space_aligned(BlockType::ALIGNMENT));
EXPECT_EQ(aligned_block->inner_size(), kExpectedSize);

// Check the next block.
EXPECT_NE(next, static_cast<BlockType *>(nullptr));
EXPECT_EQ(aligned_block->next(), next);
EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(),
bytes.data() + bytes.size());
}

TEST_FOR_EACH_BLOCK_TYPE(AllocateNeedsAlignment) {
constexpr size_t kN = 1024;

alignas(kN) array<byte, kN> bytes{};
auto result = BlockType::init(bytes);
ASSERT_TRUE(result.has_value());
BlockType *block = *result;

// Ensure first the usable_data is only aligned to the block alignment.
ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));

// Now pick an alignment such that the usable space is not already aligned to
// it. We want to explicitly test that the block will split into one before
// it.
constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));

constexpr size_t kSize = 10;
EXPECT_TRUE(block->can_allocate(kAlignment, kSize));

auto [aligned_block, prev, next] =
BlockType::allocate(block, kAlignment, kSize);

// Check the previous block was created appropriately. Since this block is the
// first block, a new one should be made before this.
EXPECT_NE(prev, static_cast<BlockType *>(nullptr));
EXPECT_EQ(aligned_block->prev(), prev);
EXPECT_EQ(prev->next(), aligned_block);
EXPECT_EQ(prev->outer_size(), reinterpret_cast<uintptr_t>(aligned_block) -
reinterpret_cast<uintptr_t>(prev));

// Ensure we the block is aligned and the size we expect.
EXPECT_NE(next, static_cast<BlockType *>(nullptr));
EXPECT_TRUE(aligned_block->is_usable_space_aligned(kAlignment));

// Check the next block.
EXPECT_NE(next, static_cast<BlockType *>(nullptr));
EXPECT_EQ(aligned_block->next(), next);
EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
EXPECT_EQ(reinterpret_cast<byte *>(next) + next->outer_size(), &*bytes.end());
}

TEST_FOR_EACH_BLOCK_TYPE(PreviousBlockMergedIfNotFirst) {
constexpr size_t kN = 1024;

alignas(kN) array<byte, kN> bytes{};
auto result = BlockType::init(bytes);
ASSERT_TRUE(result.has_value());
BlockType *block = *result;

// Split the block roughly halfway and work on the second half.
auto result2 = BlockType::split(block, kN / 2);
ASSERT_TRUE(result2.has_value());
BlockType *newblock = *result2;
ASSERT_EQ(newblock->prev(), block);
size_t old_prev_size = block->outer_size();

// Now pick an alignment such that the usable space is not already aligned to
// it. We want to explicitly test that the block will split into one before
// it.
constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
ASSERT_FALSE(newblock->is_usable_space_aligned(kAlignment));

// Ensure we can allocate in the new block.
constexpr size_t kSize = BlockType::ALIGNMENT;
EXPECT_TRUE(newblock->can_allocate(kAlignment, kSize));

auto [aligned_block, prev, next] =
BlockType::allocate(newblock, kAlignment, kSize);

// Now there should be no new previous block. Instead, the padding we did
// create should be merged into the original previous block.
EXPECT_EQ(prev, static_cast<BlockType *>(nullptr));
EXPECT_EQ(aligned_block->prev(), block);
EXPECT_EQ(block->next(), aligned_block);
EXPECT_GT(block->outer_size(), old_prev_size);
}

TEST_FOR_EACH_BLOCK_TYPE(CanRemergeBlockAllocations) {
// Finally to ensure we made the split blocks correctly via allocate. We
// should be able to reconstruct the original block from the blocklets.
//
// This is the same setup as with the `AllocateNeedsAlignment` test case.
constexpr size_t kN = 1024;

alignas(kN) array<byte, kN> bytes{};
auto result = BlockType::init(bytes);
ASSERT_TRUE(result.has_value());
BlockType *block = *result;

// Ensure first the usable_data is only aligned to the block alignment.
ASSERT_EQ(block->usable_space(), bytes.data() + BlockType::BLOCK_OVERHEAD);
ASSERT_EQ(block->prev(), static_cast<BlockType *>(nullptr));

// Now pick an alignment such that the usable space is not already aligned to
// it. We want to explicitly test that the block will split into one before
// it.
constexpr size_t kAlignment = bit_ceil(BlockType::BLOCK_OVERHEAD) * 8;
ASSERT_FALSE(block->is_usable_space_aligned(kAlignment));

constexpr size_t kSize = BlockType::ALIGNMENT;
EXPECT_TRUE(block->can_allocate(kAlignment, kSize));

auto [aligned_block, prev, next] =
BlockType::allocate(block, kAlignment, kSize);

// Check we have the appropriate blocks.
ASSERT_NE(prev, static_cast<BlockType *>(nullptr));
ASSERT_FALSE(prev->last());
ASSERT_EQ(aligned_block->prev(), prev);
EXPECT_NE(next, static_cast<BlockType *>(nullptr));
EXPECT_NE(next, static_cast<BlockType *>(nullptr));
EXPECT_EQ(aligned_block->next(), next);
EXPECT_EQ(next->next(), static_cast<BlockType *>(nullptr));
ASSERT_TRUE(next->last());

// Now check for successful merges.
EXPECT_TRUE(BlockType::merge_next(prev));
EXPECT_EQ(prev->next(), next);
EXPECT_TRUE(BlockType::merge_next(prev));
EXPECT_EQ(prev->next(), static_cast<BlockType *>(nullptr));
EXPECT_TRUE(prev->last());

// We should have the original buffer.
EXPECT_EQ(reinterpret_cast<byte *>(prev), &*bytes.begin());
EXPECT_EQ(prev->outer_size(), bytes.size());
EXPECT_EQ(reinterpret_cast<byte *>(prev) + prev->outer_size(), &*bytes.end());
}
80 changes: 75 additions & 5 deletions libc/test/src/__support/freelist_heap_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,6 @@ TEST_FOR_EACH_ALLOCATOR(CanAllocate, 2048) {
void *ptr = allocator.allocate(ALLOC_SIZE);

ASSERT_NE(ptr, static_cast<void *>(nullptr));
// In this case, the allocator should be returning us the start of the chunk.
EXPECT_EQ(ptr, static_cast<void *>(
reinterpret_cast<cpp::byte *>(allocator.region_start()) +
FreeListHeap<>::BlockType::BLOCK_OVERHEAD));
}

TEST_FOR_EACH_ALLOCATOR(AllocationsDontOverlap, 2048) {
Expand Down Expand Up @@ -94,7 +90,10 @@ TEST(LlvmLibcFreeListHeap, ReturnsNullWhenFull) {

FreeListHeap<> allocator(buf);

EXPECT_NE(allocator.allocate(N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
// Use aligned_allocate so we don't need to worry about ensuring the `buf`
// being aligned to max_align_t.
EXPECT_NE(allocator.aligned_allocate(
1, N - FreeListHeap<>::BlockType::BLOCK_OVERHEAD),
static_cast<void *>(nullptr));
EXPECT_EQ(allocator.allocate(1), static_cast<void *>(nullptr));
}
Expand Down Expand Up @@ -214,4 +213,75 @@ TEST_FOR_EACH_ALLOCATOR(CallocTooLarge, 2048) {
EXPECT_EQ(allocator.calloc(1, ALLOC_SIZE), static_cast<void *>(nullptr));
}

TEST_FOR_EACH_ALLOCATOR(AllocateZero, 2048) {
void *ptr = allocator.allocate(0);
ASSERT_EQ(ptr, static_cast<void *>(nullptr));
}

TEST_FOR_EACH_ALLOCATOR(AlignedAlloc, 2048) {
constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};

for (size_t alignment : ALIGNMENTS) {
for (size_t scale : SIZE_SCALES) {
size_t size = alignment * scale;
void *ptr = allocator.aligned_allocate(alignment, size);
EXPECT_NE(ptr, static_cast<void *>(nullptr));
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
allocator.free(ptr);
}
}
}

// This test is not part of the TEST_FOR_EACH_ALLOCATOR since we want to
// explicitly ensure that the buffer can still return aligned allocations even
// if the underlying buffer is at most aligned to the BlockType alignment. This
// is so we can check that we can still get aligned allocations even if the
// underlying buffer is not aligned to the alignments we request.
TEST(LlvmLibcFreeListHeap, AlignedAllocOnlyBlockTypeAligned) {
constexpr size_t BUFFER_SIZE = 4096;
constexpr size_t BUFFER_ALIGNMENT = alignof(FreeListHeap<>::BlockType) * 2;
alignas(BUFFER_ALIGNMENT) cpp::byte buf[BUFFER_SIZE] = {cpp::byte(0)};

// Ensure the underlying buffer is at most aligned to the block type.
FreeListHeap<> allocator(
span<cpp::byte>(buf).subspan(alignof(FreeListHeap<>::BlockType)));

constexpr size_t ALIGNMENTS[] = {1, 2, 4, 8, 16, 32, 64, 128, 256};
constexpr size_t SIZE_SCALES[] = {1, 2, 3, 4, 5};

for (size_t alignment : ALIGNMENTS) {
for (size_t scale : SIZE_SCALES) {
size_t size = alignment * scale;
void *ptr = allocator.aligned_allocate(alignment, size);
EXPECT_NE(ptr, static_cast<void *>(nullptr));
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, size_t(0));
allocator.free(ptr);
}
}
}

TEST_FOR_EACH_ALLOCATOR(InvalidAlignedAllocAlignment, 2048) {
// Must be a power of 2.
constexpr size_t ALIGNMENTS[] = {4, 8, 16, 32, 64, 128, 256};
for (size_t alignment : ALIGNMENTS) {
void *ptr = allocator.aligned_allocate(alignment - 1, alignment - 1);
EXPECT_EQ(ptr, static_cast<void *>(nullptr));
}

// Size must be a multiple of alignment
for (size_t alignment : ALIGNMENTS) {
void *ptr = allocator.aligned_allocate(alignment, alignment + 1);
EXPECT_EQ(ptr, static_cast<void *>(nullptr));
}

// Don't accept zero size.
void *ptr = allocator.aligned_allocate(1, 0);
EXPECT_EQ(ptr, static_cast<void *>(nullptr));

// Don't accept zero alignment.
ptr = allocator.aligned_allocate(0, 8);
EXPECT_EQ(ptr, static_cast<void *>(nullptr));
}

} // namespace LIBC_NAMESPACE
18 changes: 18 additions & 0 deletions libc/test/src/__support/freelist_malloc_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//

#include "src/__support/freelist_heap.h"
#include "src/stdlib/aligned_alloc.h"
#include "src/stdlib/calloc.h"
#include "src/stdlib/free.h"
#include "src/stdlib/malloc.h"
Expand Down Expand Up @@ -53,4 +54,21 @@ TEST(LlvmLibcFreeListMalloc, MallocStats) {
kAllocSize + kCallocNum * kCallocSize);
EXPECT_EQ(freelist_heap_stats.cumulative_freed,
kAllocSize + kCallocNum * kCallocSize);

constexpr size_t ALIGN = kAllocSize;
void *ptr3 = LIBC_NAMESPACE::aligned_alloc(ALIGN, kAllocSize);
EXPECT_NE(ptr3, static_cast<void *>(nullptr));
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % ALIGN, size_t(0));
EXPECT_EQ(freelist_heap_stats.bytes_allocated, kAllocSize);
EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
kAllocSize + kCallocNum * kCallocSize + kAllocSize);
EXPECT_EQ(freelist_heap_stats.cumulative_freed,
kAllocSize + kCallocNum * kCallocSize);

LIBC_NAMESPACE::free(ptr3);
EXPECT_EQ(freelist_heap_stats.bytes_allocated, size_t(0));
EXPECT_EQ(freelist_heap_stats.cumulative_allocated,
kAllocSize + kCallocNum * kCallocSize + kAllocSize);
EXPECT_EQ(freelist_heap_stats.cumulative_freed,
kAllocSize + kCallocNum * kCallocSize + kAllocSize);
}