Skip to content

Commit

Permalink
PA: Rename PartitionFreelistEntry
Browse files Browse the repository at this point in the history
This CL is trivial. This CL is motivated by setting up PartitionAlloc's
experimental freelist implementation for runtime experimentation. In
such a case, we can't have two different implementations sharing the
same typename.

This CL trivially renames the existing, default implementation of
`PartitionFreelistEntry` to `EncodedNextFreelistEntry`. The
experimental, currently non-operational implementation is renamed
`PoolOffsetFreelistEntry`.

Note that while this CL is trivial and changes nothing, it does happen
to break the experimental implementation, since we haven't introduced
any code to disambiguate the two types at this point. This is a
no-frills rename CL.

Bug: 1461983
Change-Id: I1e7795e5c8cc0b094a1843bb8fa8cbe9f5934b61
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4809878
Reviewed-by: Takashi Sakamoto <tasak@google.com>
Reviewed-by: Yuki Shiino <yukishiino@chromium.org>
Commit-Queue: Kalvin Lee <kdlee@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1188162}
  • Loading branch information
Kalvin Lee authored and Chromium LUCI CQ committed Aug 25, 2023
1 parent 9ce839d commit 4d4c616
Show file tree
Hide file tree
Showing 13 changed files with 108 additions and 111 deletions.
2 changes: 1 addition & 1 deletion base/allocator/partition_allocator/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ component("partition_alloc") {

# Changes the freelist implementation to use pointer offsets in lieu
# of full-on pointers. Defaults to false, which implies the use of
# `EncodedPartitionFreelistEntryPtr`.
# "encoded next" freelist entry.
#
# Only usable when pointers are 64-bit.
use_freelist_pool_offsets = has_64_bit_pointers && false
Expand Down
77 changes: 38 additions & 39 deletions base/allocator/partition_allocator/encoded_next_freelist.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,18 @@

namespace partition_alloc::internal {

class PartitionFreelistEntry;
class EncodedNextFreelistEntry;

class EncodedPartitionFreelistEntryPtr {
class EncodedFreelistPtr {
private:
PA_ALWAYS_INLINE constexpr explicit EncodedPartitionFreelistEntryPtr(
std::nullptr_t)
PA_ALWAYS_INLINE constexpr explicit EncodedFreelistPtr(std::nullptr_t)
: encoded_(Transform(0)) {}
PA_ALWAYS_INLINE explicit EncodedPartitionFreelistEntryPtr(void* ptr)
PA_ALWAYS_INLINE explicit EncodedFreelistPtr(void* ptr)
// The encoded pointer stays MTE-tagged.
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}

PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const {
return reinterpret_cast<PartitionFreelistEntry*>(Transform(encoded_));
PA_ALWAYS_INLINE EncodedNextFreelistEntry* Decode() const {
return reinterpret_cast<EncodedNextFreelistEntry*>(Transform(encoded_));
}

PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
Expand Down Expand Up @@ -71,33 +70,33 @@ class EncodedPartitionFreelistEntryPtr {

uintptr_t encoded_;

friend PartitionFreelistEntry;
friend EncodedNextFreelistEntry;
};

// Freelist entries are encoded for security reasons. See
// //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
// the rationale and mechanism, respectively.
class PartitionFreelistEntry {
class EncodedNextFreelistEntry {
private:
constexpr explicit PartitionFreelistEntry(std::nullptr_t)
: encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
constexpr explicit EncodedNextFreelistEntry(std::nullptr_t)
: encoded_next_(EncodedFreelistPtr(nullptr))
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
,
shadow_(encoded_next_.Inverted())
#endif
{
}
explicit PartitionFreelistEntry(PartitionFreelistEntry* next)
: encoded_next_(EncodedPartitionFreelistEntryPtr(next))
explicit EncodedNextFreelistEntry(EncodedNextFreelistEntry* next)
: encoded_next_(EncodedFreelistPtr(next))
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
,
shadow_(encoded_next_.Inverted())
#endif
{
}
// For testing only.
PartitionFreelistEntry(void* next, bool make_shadow_match)
: encoded_next_(EncodedPartitionFreelistEntryPtr(next))
EncodedNextFreelistEntry(void* next, bool make_shadow_match)
: encoded_next_(EncodedFreelistPtr(next))
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
,
shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
Expand All @@ -106,17 +105,17 @@ class PartitionFreelistEntry {
}

public:
~PartitionFreelistEntry() = delete;
~EncodedNextFreelistEntry() = delete;

// Emplaces the freelist entry at the beginning of the given slot span, and
// initializes it as null-terminated.
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
void* slot_start_tagged) {
// |slot_start_tagged| is MTE-tagged.
auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
auto* entry = new (slot_start_tagged) EncodedNextFreelistEntry(nullptr);
return entry;
}
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
PA_ALWAYS_INLINE static EncodedNextFreelistEntry* EmplaceAndInitNull(
uintptr_t slot_start) {
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
}
Expand All @@ -127,11 +126,11 @@ class PartitionFreelistEntry {
// This freelist is built for the purpose of thread-cache. This means that we
// can't perform a check that this and the next pointer belong to the same
// super page, as thread-cache spans may chain slots across super pages.
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitForThreadCache(
uintptr_t slot_start,
PartitionFreelistEntry* next) {
PA_ALWAYS_INLINE static EncodedNextFreelistEntry*
EmplaceAndInitForThreadCache(uintptr_t slot_start,
EncodedNextFreelistEntry* next) {
auto* entry =
new (SlotStartAddr2Ptr(slot_start)) PartitionFreelistEntry(next);
new (SlotStartAddr2Ptr(slot_start)) EncodedNextFreelistEntry(next);
return entry;
}

Expand All @@ -144,20 +143,20 @@ class PartitionFreelistEntry {
void* next,
bool make_shadow_match) {
new (SlotStartAddr2Ptr(slot_start))
PartitionFreelistEntry(next, make_shadow_match);
EncodedNextFreelistEntry(next, make_shadow_match);
}

void CorruptNextForTesting(uintptr_t v) {
// We just need a value that can never be a valid pointer here.
encoded_next_.Override(EncodedPartitionFreelistEntryPtr::Transform(v));
encoded_next_.Override(EncodedFreelistPtr::Transform(v));
}

// Puts `slot_size` on the stack before crashing in case of memory
// corruption. Meant to be used to report the failed allocation size.
template <bool crash_on_corruption>
PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextForThreadCache(
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextForThreadCache(
size_t slot_size) const;
PA_ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t slot_size) const;
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNext(size_t slot_size) const;

PA_NOINLINE void CheckFreeList(size_t slot_size) const {
for (auto* entry = this; entry; entry = entry->GetNext(slot_size)) {
Expand All @@ -172,7 +171,7 @@ class PartitionFreelistEntry {
}
}

PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* entry) {
PA_ALWAYS_INLINE void SetNext(EncodedNextFreelistEntry* entry) {
// SetNext() is either called on the freelist head, when provisioning new
// slots, or when GetNext() has been called before, no need to pass the
// size.
Expand All @@ -187,7 +186,7 @@ class PartitionFreelistEntry {
}
#endif // BUILDFLAG(PA_DCHECK_IS_ON)

encoded_next_ = EncodedPartitionFreelistEntryPtr(entry);
encoded_next_ = EncodedFreelistPtr(entry);
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
shadow_ = encoded_next_.Inverted();
#endif
Expand All @@ -210,12 +209,12 @@ class PartitionFreelistEntry {

private:
template <bool crash_on_corruption>
PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextInternal(
PA_ALWAYS_INLINE EncodedNextFreelistEntry* GetNextInternal(
size_t slot_size,
bool for_thread_cache) const;

PA_ALWAYS_INLINE static bool IsSane(const PartitionFreelistEntry* here,
const PartitionFreelistEntry* next,
PA_ALWAYS_INLINE static bool IsSane(const EncodedNextFreelistEntry* here,
const EncodedNextFreelistEntry* next,
bool for_thread_cache) {
// Don't allow the freelist to be blindly followed to any location.
// Checks two constraints:
Expand Down Expand Up @@ -257,7 +256,7 @@ class PartitionFreelistEntry {
}
}

EncodedPartitionFreelistEntryPtr encoded_next_;
EncodedFreelistPtr encoded_next_;
// This is intended to detect unintentional corruptions of the freelist.
// These can happen due to a Use-after-Free, or overflow of the previous
// allocation in the slot span.
Expand All @@ -267,9 +266,9 @@ class PartitionFreelistEntry {
};

template <bool crash_on_corruption>
PA_ALWAYS_INLINE PartitionFreelistEntry*
PartitionFreelistEntry::GetNextInternal(size_t slot_size,
bool for_thread_cache) const {
PA_ALWAYS_INLINE EncodedNextFreelistEntry*
EncodedNextFreelistEntry::GetNextInternal(size_t slot_size,
bool for_thread_cache) const {
// GetNext() can be called on discarded memory, in which case |encoded_next_|
// is 0, and none of the checks apply. Don't prefetch nullptr either.
if (IsEncodedNextPtrZero()) {
Expand Down Expand Up @@ -306,12 +305,12 @@ PartitionFreelistEntry::GetNextInternal(size_t slot_size,
}

template <bool crash_on_corruption>
PA_ALWAYS_INLINE PartitionFreelistEntry*
PartitionFreelistEntry::GetNextForThreadCache(size_t slot_size) const {
PA_ALWAYS_INLINE EncodedNextFreelistEntry*
EncodedNextFreelistEntry::GetNextForThreadCache(size_t slot_size) const {
return GetNextInternal<crash_on_corruption>(slot_size, true);
}

PA_ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
PA_ALWAYS_INLINE EncodedNextFreelistEntry* EncodedNextFreelistEntry::GetNext(
size_t slot_size) const {
return GetNextInternal<true>(slot_size, false);
}
Expand Down
14 changes: 7 additions & 7 deletions base/allocator/partition_allocator/hardening_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ TEST(HardeningTest, PartialCorruption) {
// Even if it looks reasonable (valid encoded pointer), freelist corruption
// detection will make the code crash, because shadow_ doesn't match
// encoded_next_.
PartitionFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
to_corrupt, false);
EncodedNextFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
to_corrupt, false);
EXPECT_DEATH(root.Alloc(kAllocSize, ""), "");
}

Expand All @@ -65,8 +65,8 @@ TEST(HardeningTest, OffHeapPointerCrashing) {

// See "PartialCorruption" above for details. This time, make shadow_
// consistent.
PartitionFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
to_corrupt, true);
EncodedNextFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
to_corrupt, true);

// Crashes, because |to_corrupt| is not on the same superpage as data.
EXPECT_DEATH(root.Alloc(kAllocSize, ""), "");
Expand All @@ -86,7 +86,7 @@ TEST(HardeningTest, MetadataPointerCrashing) {

uintptr_t slot_start = root.ObjectToSlotStart(data);
auto* metadata = SlotSpanMetadata::FromSlotStart(slot_start);
PartitionFreelistEntry::EmplaceAndInitForTest(slot_start, metadata, true);
EncodedNextFreelistEntry::EmplaceAndInitForTest(slot_start, metadata, true);

// Crashes, because |metadata| points inside the metadata area.
EXPECT_DEATH(root.Alloc(kAllocSize, ""), "");
Expand Down Expand Up @@ -117,8 +117,8 @@ TEST(HardeningTest, SuccessfulCorruption) {
root.Free(data2);
root.Free(data);

PartitionFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
to_corrupt, true);
EncodedNextFreelistEntry::EmplaceAndInitForTest(root.ObjectToSlotStart(data),
to_corrupt, true);

#if BUILDFLAG(USE_FREESLOT_BITMAP)
// This part crashes with freeslot bitmap because it detects freelist
Expand Down
8 changes: 4 additions & 4 deletions base/allocator/partition_allocator/partition_bucket.cc
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
return nullptr;
}

auto* next_entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
auto* next_entry = EncodedNextFreelistEntry::EmplaceAndInitNull(slot_start);
page->slot_span_metadata.SetFreelistHead(next_entry);

map_extent = &metadata->direct_map_extent;
Expand Down Expand Up @@ -967,7 +967,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
}
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
EncodedNextFreelistEntry* prev_entry = nullptr;
uintptr_t next_slot_end = next_slot + slot_size;
size_t free_list_entries_added = 0;
while (next_slot_end <= commit_end) {
Expand All @@ -986,7 +986,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne(PartitionRoot* root,
#else // PA_CONFIG(HAS_MEMORY_TAGGING)
next_slot_ptr = reinterpret_cast<void*>(next_slot);
#endif
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry);
PA_DCHECK(!free_list_entries_added);
Expand Down Expand Up @@ -1440,7 +1440,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
// If we found an active slot span with free slots, or an empty slot span, we
// have a usable freelist head.
if (PA_LIKELY(new_slot_span->get_freelist_head() != nullptr)) {
PartitionFreelistEntry* entry =
EncodedNextFreelistEntry* entry =
new_slot_span->PopForAlloc(new_bucket->slot_size);

// We may have set *is_already_zeroed to true above, make sure that the
Expand Down
7 changes: 2 additions & 5 deletions base/allocator/partition_allocator/partition_freelist_entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

// Indirection header that allows callers to use
// `PartitionFreelistEntry` without regard for the implementation.

#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_

Expand Down Expand Up @@ -33,7 +30,7 @@ namespace partition_alloc::internal {

// Assertions that are agnostic to the implementation of the freelist.

static_assert(kSmallestBucket >= sizeof(PartitionFreelistEntry),
static_assert(kSmallestBucket >= sizeof(EncodedNextFreelistEntry),
"Need enough space for freelist entries in the smallest slot");

#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
Expand All @@ -44,7 +41,7 @@ constexpr size_t kSmallestUsedBucket =
base::bits::AlignUp(1 + sizeof(PartitionRefCount), kSmallestBucket);
}
static_assert(kSmallestUsedBucket >=
sizeof(PartitionFreelistEntry) + sizeof(PartitionRefCount),
sizeof(EncodedNextFreelistEntry) + sizeof(PartitionRefCount),
"Need enough space for freelist entries and the ref-count in the "
"smallest *used* slot");
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
Expand Down
8 changes: 4 additions & 4 deletions base/allocator/partition_allocator/partition_page.cc
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ void SlotSpanMetadata::SortFreelist() {

size_t num_free_slots = 0;
size_t slot_size = bucket->slot_size;
for (PartitionFreelistEntry* head = freelist_head; head;
for (EncodedNextFreelistEntry* head = freelist_head; head;
head = head->GetNext(slot_size)) {
++num_free_slots;
size_t offset_in_slot_span = SlotStartPtr2Addr(head) - slot_span_start;
Expand All @@ -272,14 +272,14 @@ void SlotSpanMetadata::SortFreelist() {

// Empty or single-element list is always sorted.
if (num_free_slots > 1) {
PartitionFreelistEntry* back = nullptr;
PartitionFreelistEntry* head = nullptr;
EncodedNextFreelistEntry* back = nullptr;
EncodedNextFreelistEntry* head = nullptr;

for (size_t slot_number = 0; slot_number < num_provisioned_slots;
slot_number++) {
if (free_slots[slot_number]) {
uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
auto* entry = EncodedNextFreelistEntry::EmplaceAndInitNull(slot_start);

if (!head) {
head = entry;
Expand Down

0 comments on commit 4d4c616

Please sign in to comment.