Skip to content

Commit

Permalink
PartitionAlloc: Re-add MTECheckedPtr
Browse files Browse the repository at this point in the history
This change re-introduces MTECheckedPtr into the machinery of
PartitionAlloc.

Bug: 1298696
Change-Id: I3862849a959e9fe78def41376dde183650fc8832
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3494580
Reviewed-by: Bartek Nowierski <bartekn@chromium.org>
Commit-Queue: Kalvin Lee <kdlee@chromium.org>
Cr-Commit-Position: refs/heads/main@{#984989}
  • Loading branch information
Kalvin Lee authored and Chromium LUCI CQ committed Mar 24, 2022
1 parent 6d83dc5 commit b1d7530
Show file tree
Hide file tree
Showing 8 changed files with 539 additions and 45 deletions.
Expand Up @@ -151,6 +151,7 @@ MaxRegularSlotSpanSize() {
// | Guard page (4 KiB) |
// | Metadata page (4 KiB) |
// | Guard pages (8 KiB) |
// | TagBitmap |
// | *Scan State Bitmap |
// | Slot span |
// | Slot span |
Expand All @@ -159,7 +160,9 @@ MaxRegularSlotSpanSize() {
// | Guard pages (16 KiB) |
// +-----------------------+
//
// State Bitmap is inserted for partitions that may have quarantine enabled.
// TagBitmap is only present when
// defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) is true. State Bitmap
// is inserted for partitions that may have quarantine enabled.
//
// If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
// after the Metadata page for BackupRefPtr. The guard pages after the bitmap
Expand Down Expand Up @@ -353,7 +356,6 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
// Intentionally set to less than 2GiB to make sure that a 2GiB allocation
// fails. This is a security choice in Chrome, to help making size_t vs int bugs
// harder to exploit.
//

PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
MaxDirectMapped() {
Expand Down
107 changes: 95 additions & 12 deletions base/allocator/partition_allocator/partition_alloc_unittest.cc
Expand Up @@ -29,6 +29,7 @@
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "base/bits.h"
Expand Down Expand Up @@ -575,7 +576,9 @@ TEST_P(PartitionAllocTest, Basic) {
EXPECT_EQ(kPointerOffset,
reinterpret_cast<size_t>(ptr) & PartitionPageOffsetMask());
// Check that the offset appears to include a guard page.
EXPECT_EQ(PartitionPageSize() + kPointerOffset,
EXPECT_EQ(PartitionPageSize() +
partition_alloc::internal::ReservedTagBitmapSize() +
kPointerOffset,
reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);

allocator.root()->Free(ptr);
Expand Down Expand Up @@ -853,9 +856,11 @@ TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {
// super page.
TEST_P(PartitionAllocTest, MultiPageAllocs) {
size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
// 1 super page has 2 guard partition pages.
// 1 super page has 2 guard partition pages and a tag bitmap.
size_t num_slot_spans_needed =
(NumPartitionPagesPerSuperPage() - 2) / num_pages_per_slot_span;
(NumPartitionPagesPerSuperPage() - 2 -
partition_alloc::internal::NumPartitionPagesPerTagBitmap()) /
num_pages_per_slot_span;

// We need one more slot span in order to cross super page boundary.
++num_slot_spans_needed;
Expand All @@ -874,8 +879,11 @@ TEST_P(PartitionAllocTest, MultiPageAllocs) {
uintptr_t second_super_page_offset =
slot_span_start & kSuperPageOffsetMask;
EXPECT_FALSE(second_super_page_base == first_super_page_base);
// Check that we allocated a guard page for the second page.
EXPECT_EQ(PartitionPageSize(), second_super_page_offset);
// Check that we allocated a guard page and the reserved tag bitmap for
// the second page.
EXPECT_EQ(PartitionPageSize() +
partition_alloc::internal::ReservedTagBitmapSize(),
second_super_page_offset);
}
}
for (i = 0; i < num_slot_spans_needed; ++i)
Expand Down Expand Up @@ -1792,9 +1800,11 @@ TEST_P(PartitionAllocTest, PartialPages) {
TEST_P(PartitionAllocTest, MappingCollision) {
size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize);
// The -2 is because the first and last partition pages in a super page are
// guard pages.
// guard pages. We also discount the partition pages used for the tag bitmap.
size_t num_slot_span_needed =
(NumPartitionPagesPerSuperPage() - 2) / num_pages_per_slot_span;
(NumPartitionPagesPerSuperPage() - 2 -
partition_alloc::internal::NumPartitionPagesPerTagBitmap()) /
num_pages_per_slot_span;
size_t num_partition_pages_needed =
num_slot_span_needed * num_pages_per_slot_span;

Expand All @@ -1809,8 +1819,11 @@ TEST_P(PartitionAllocTest, MappingCollision) {

uintptr_t slot_spart_start =
SlotSpan::ToSlotSpanStart(first_super_page_pages[0]);
EXPECT_EQ(PartitionPageSize(), slot_spart_start & kSuperPageOffsetMask);
uintptr_t super_page = slot_spart_start - PartitionPageSize();
EXPECT_EQ(
PartitionPageSize() + partition_alloc::internal::ReservedTagBitmapSize(),
slot_spart_start & kSuperPageOffsetMask);
uintptr_t super_page = slot_spart_start - PartitionPageSize() -
partition_alloc::internal::ReservedTagBitmapSize();
// Map a single system page either side of the mapping for our allocations,
// with the goal of tripping up alignment of the next mapping.
uintptr_t map1 = AllocPages(
Expand All @@ -1831,9 +1844,11 @@ TEST_P(PartitionAllocTest, MappingCollision) {
FreePages(map2, PageAllocationGranularity());

super_page = SlotSpan::ToSlotSpanStart(second_super_page_pages[0]);
EXPECT_EQ(PartitionPageSize(),
reinterpret_cast<uintptr_t>(super_page) & kSuperPageOffsetMask);
super_page -= PartitionPageSize();
EXPECT_EQ(
PartitionPageSize() + partition_alloc::internal::ReservedTagBitmapSize(),
reinterpret_cast<uintptr_t>(super_page) & kSuperPageOffsetMask);
super_page -=
PartitionPageSize() - partition_alloc::internal::ReservedTagBitmapSize();
// Map a single system page either side of the mapping for our allocations,
// with the goal of tripping up alignment of the next mapping.
map1 = AllocPages(super_page - PageAllocationGranularity(),
Expand Down Expand Up @@ -4402,6 +4417,74 @@ TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {
kMaxFreeableSpans * bucket_size);
}

#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

// Verifies basic PA support for `MTECheckedPtr`.
TEST_P(PartitionAllocTest, PartitionTagBasic) {
const size_t alloc_size = 64 - kExtraAllocSize;
void* ptr1 = allocator.root()->Alloc(alloc_size, type_name);
void* ptr2 = allocator.root()->Alloc(alloc_size, type_name);
void* ptr3 = allocator.root()->Alloc(alloc_size, type_name);
EXPECT_TRUE(ptr1);
EXPECT_TRUE(ptr2);
EXPECT_TRUE(ptr3);

auto* slot_span = SlotSpan::FromObject(ptr1);
EXPECT_TRUE(slot_span);

char* char_ptr1 = reinterpret_cast<char*>(ptr1);
char* char_ptr2 = reinterpret_cast<char*>(ptr2);
char* char_ptr3 = reinterpret_cast<char*>(ptr3);
EXPECT_LT(kTestAllocSize, slot_span->bucket->slot_size);
EXPECT_EQ(char_ptr1 + slot_span->bucket->slot_size, char_ptr2);
EXPECT_EQ(char_ptr2 + slot_span->bucket->slot_size, char_ptr3);
constexpr partition_alloc::PartitionTag kTag1 =
static_cast<partition_alloc::PartitionTag>(0xBADA);
constexpr partition_alloc::PartitionTag kTag2 =
static_cast<partition_alloc::PartitionTag>(0xDB8A);
constexpr partition_alloc::PartitionTag kTag3 =
static_cast<partition_alloc::PartitionTag>(0xA3C4);

partition_alloc::internal::PartitionTagSetValue(
ptr1, slot_span->bucket->slot_size, kTag1);
partition_alloc::internal::PartitionTagSetValue(
ptr2, slot_span->bucket->slot_size, kTag2);
partition_alloc::internal::PartitionTagSetValue(
ptr3, slot_span->bucket->slot_size, kTag3);

memset(ptr1, 0, alloc_size);
memset(ptr2, 0, alloc_size);
memset(ptr3, 0, alloc_size);

EXPECT_EQ(kTag1, partition_alloc::internal::PartitionTagGetValue(ptr1));
EXPECT_EQ(kTag2, partition_alloc::internal::PartitionTagGetValue(ptr2));
EXPECT_EQ(kTag3, partition_alloc::internal::PartitionTagGetValue(ptr3));

EXPECT_TRUE(!memchr(ptr1, static_cast<uint8_t>(kTag1), alloc_size));
EXPECT_TRUE(!memchr(ptr2, static_cast<uint8_t>(kTag2), alloc_size));

allocator.root()->Free(ptr1);
EXPECT_EQ(kTag2, partition_alloc::internal::PartitionTagGetValue(ptr2));

size_t request_size = slot_span->bucket->slot_size - kExtraAllocSize;
void* new_ptr2 = allocator.root()->Realloc(ptr2, request_size, type_name);
EXPECT_EQ(ptr2, new_ptr2);
EXPECT_EQ(kTag3, partition_alloc::internal::PartitionTagGetValue(ptr3));

// Add 1B to ensure the object is rellocated to a larger slot.
request_size = slot_span->bucket->slot_size - kExtraAllocSize + 1;
new_ptr2 = allocator.root()->Realloc(ptr2, request_size, type_name);
EXPECT_TRUE(new_ptr2);
EXPECT_NE(ptr2, new_ptr2);

allocator.root()->Free(new_ptr2);

EXPECT_EQ(kTag3, partition_alloc::internal::PartitionTagGetValue(ptr3));
allocator.root()->Free(ptr3);
}

#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

#if BUILDFLAG(IS_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(IS_CHROMECAST)
extern "C" {
Expand Down
47 changes: 46 additions & 1 deletion base/allocator/partition_allocator/partition_bucket.cc
Expand Up @@ -19,6 +19,8 @@
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#include "base/allocator/partition_allocator/tagging.h"
Expand Down Expand Up @@ -634,6 +636,28 @@ PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
// Double check that we had enough space in the super page for the new slot
// span.
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);

#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PA_DCHECK(root->next_tag_bitmap_page);
uintptr_t next_tag_bitmap_page =
base::bits::AlignUp(reinterpret_cast<uintptr_t>(
PartitionTagPointer(root->next_partition_page)),
SystemPageSize());
if (root->next_tag_bitmap_page < next_tag_bitmap_page) {
#if DCHECK_IS_ON()
uintptr_t super_page =
reinterpret_cast<uintptr_t>(slot_span) & kSuperPageBaseMask;
uintptr_t tag_bitmap = super_page + PartitionPageSize();
PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize());
PA_DCHECK(next_tag_bitmap_page > tag_bitmap);
#endif
SetSystemPagesAccess(root->next_tag_bitmap_page,
next_tag_bitmap_page - root->next_tag_bitmap_page,
PageAccessibilityConfiguration::kReadWrite);
root->next_tag_bitmap_page = next_tag_bitmap_page;
}
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

return slot_span;
}

Expand Down Expand Up @@ -666,7 +690,9 @@ ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
std::memory_order_relaxed);

root->next_super_page = super_page + kSuperPageSize;
uintptr_t state_bitmap = super_page + PartitionPageSize();
// TODO(crbug.com/1307514): Add direct map support.
uintptr_t state_bitmap = super_page + PartitionPageSize() +
(is_direct_mapped() ? 0 : ReservedTagBitmapSize());
PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
const size_t state_bitmap_reservation_size =
root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
Expand Down Expand Up @@ -745,6 +771,19 @@ ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
payload < SuperPagesEndFromExtent(current_extent));
}

#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// `root->next_partition_page` currently points at the start of the
// super page payload. We point `root->next_tag_bitmap_page` to the
// corresponding point in the tag bitmap and let the caller
// (slot span allocation) take care of the rest.
root->next_tag_bitmap_page =
base::bits::AlignDown(reinterpret_cast<uintptr_t>(
PartitionTagPointer(root->next_partition_page)),
SystemPageSize());
PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize())
<< "tag bitmap can never intrude on metadata partition page";
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

// If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
// and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
// sure to register the super-page after it has been fully initialized.
Expand Down Expand Up @@ -841,6 +880,9 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
return_slot =
::partition_alloc::internal::TagMemoryRangeRandomly(return_slot, size);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
Expand All @@ -851,6 +893,9 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
next_slot =
::partition_alloc::internal::TagMemoryRangeRandomly(next_slot, size);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot);
if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry);
Expand Down
14 changes: 13 additions & 1 deletion base/allocator/partition_allocator/partition_page.h
Expand Up @@ -11,6 +11,7 @@
#include <limits>
#include <utility>

#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
Expand All @@ -19,6 +20,7 @@
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#include "base/allocator/partition_allocator/tagging.h"
Expand Down Expand Up @@ -410,17 +412,27 @@ CommittedStateBitmapSize() {
// caller's responsibility to ensure that the bitmaps even exist.
ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize();
return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
}
ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(uintptr_t super_page) {
return reinterpret_cast<AllocationStateMap*>(
SuperPageStateBitmapAddr(super_page));
}

// Returns the address of the tag bitmap of the `super_page`. Caller must ensure
// that bitmap exists.
ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) {
PA_DCHECK(IsReservationStart(super_page));
// Skip over the guard pages / metadata.
return super_page + PartitionPageSize();
}

ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
bool with_quarantine) {
PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0) +
(with_quarantine ? ReservedStateBitmapSize() : 0);
}

Expand Down
27 changes: 27 additions & 0 deletions base/allocator/partition_allocator/partition_root.h
Expand Up @@ -56,6 +56,7 @@
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
Expand Down Expand Up @@ -333,6 +334,12 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {

bool quarantine_always_for_testing = false;

#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
partition_alloc::PartitionTag current_partition_tag = 0;
// Points to the end of the committed tag bitmap region.
uintptr_t next_tag_bitmap_page = 0;
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

PartitionRoot()
: quarantine_mode(QuarantineMode::kAlwaysDisabled),
scan_mode(ScanMode::kDisabled) {}
Expand Down Expand Up @@ -716,6 +723,17 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot {
max_empty_slot_spans_dirty_bytes_shift = 0;
}

#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() {
// TODO(crbug.com/1298696): performance is not an issue. We can use
// random tags in lieu of sequential ones.
auto tag = ++current_partition_tag;
tag += !tag; // Avoid 0.
current_partition_tag = tag;
return tag;
}
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

private:
// |buckets| has `kNumBuckets` elements, but we sometimes access it at index
// `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout
Expand Down Expand Up @@ -1119,6 +1137,15 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
PA_PREFETCH(slot_span);
#endif // defined(PA_HAS_MEMORY_TAGGING)

#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
if (!root->IsDirectMappedBucket(slot_span->bucket)) {
size_t slot_size_less_extras =
root->AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
partition_alloc::internal::PartitionTagIncrementValue(
object, slot_size_less_extras);
}
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)

// TODO(bikineev): Change the condition to LIKELY once PCScan is enabled by
// default.
if (UNLIKELY(root->ShouldQuarantine(object))) {
Expand Down

0 comments on commit b1d7530

Please sign in to comment.