From b1d75303a1747c0b285ebcf217a899c7008019ee Mon Sep 17 00:00:00 2001 From: Kalvin Lee Date: Thu, 24 Mar 2022 20:18:02 +0000 Subject: [PATCH] PartitionAlloc: Re-add MTECheckedPtr This change re-introduces MTECheckedPtr into the machinery of PartitionAlloc. Bug: 1298696 Change-Id: I3862849a959e9fe78def41376dde183650fc8832 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3494580 Reviewed-by: Bartek Nowierski Commit-Queue: Kalvin Lee Cr-Commit-Position: refs/heads/main@{#984989} --- .../partition_alloc_constants.h | 6 +- .../partition_alloc_unittest.cc | 107 +++++++++-- .../partition_allocator/partition_bucket.cc | 47 ++++- .../partition_allocator/partition_page.h | 14 +- .../partition_allocator/partition_root.h | 27 +++ .../partition_allocator/partition_tag.h | 67 ++++--- base/memory/raw_ptr.h | 176 +++++++++++++++++- base/memory/raw_ptr_unittest.cc | 140 ++++++++++++++ 8 files changed, 539 insertions(+), 45 deletions(-) diff --git a/base/allocator/partition_allocator/partition_alloc_constants.h b/base/allocator/partition_allocator/partition_alloc_constants.h index e0c871d6f60fe7..d8eb68f532ed42 100644 --- a/base/allocator/partition_allocator/partition_alloc_constants.h +++ b/base/allocator/partition_allocator/partition_alloc_constants.h @@ -151,6 +151,7 @@ MaxRegularSlotSpanSize() { // | Guard page (4 KiB) | // | Metadata page (4 KiB) | // | Guard pages (8 KiB) | +// | TagBitmap | // | *Scan State Bitmap | // | Slot span | // | Slot span | @@ -159,7 +160,9 @@ MaxRegularSlotSpanSize() { // | Guard pages (16 KiB) | // +-----------------------+ // -// State Bitmap is inserted for partitions that may have quarantine enabled. +// TagBitmap is only present when +// defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) is true. State Bitmap +// is inserted for partitions that may have quarantine enabled. // // If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted // after the Metadata page for BackupRefPtr. The guard pages after the bitmap @@ -353,7 +356,6 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1; // Intentionally set to less than 2GiB to make sure that a 2GiB allocation // fails. This is a security choice in Chrome, to help making size_t vs int bugs // harder to exploit. -// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t MaxDirectMapped() { diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc index dc1cd262719092..977dc24732d01c 100644 --- a/base/allocator/partition_allocator/partition_alloc_unittest.cc +++ b/base/allocator/partition_allocator/partition_alloc_unittest.cc @@ -29,6 +29,7 @@ #include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_ref_count.h" #include "base/allocator/partition_allocator/partition_root.h" +#include "base/allocator/partition_allocator/partition_tag_bitmap.h" #include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/tagging.h" #include "base/bits.h" @@ -575,7 +576,9 @@ TEST_P(PartitionAllocTest, Basic) { EXPECT_EQ(kPointerOffset, reinterpret_cast(ptr) & PartitionPageOffsetMask()); // Check that the offset appears to include a guard page. - EXPECT_EQ(PartitionPageSize() + kPointerOffset, + EXPECT_EQ(PartitionPageSize() + + partition_alloc::internal::ReservedTagBitmapSize() + + kPointerOffset, reinterpret_cast(ptr) & kSuperPageOffsetMask); allocator.root()->Free(ptr); @@ -853,9 +856,11 @@ TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) { // super page. TEST_P(PartitionAllocTest, MultiPageAllocs) { size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize); - // 1 super page has 2 guard partition pages. + // 1 super page has 2 guard partition pages and a tag bitmap. size_t num_slot_spans_needed = - (NumPartitionPagesPerSuperPage() - 2) / num_pages_per_slot_span; + (NumPartitionPagesPerSuperPage() - 2 - + partition_alloc::internal::NumPartitionPagesPerTagBitmap()) / + num_pages_per_slot_span; // We need one more slot span in order to cross super page boundary. ++num_slot_spans_needed; @@ -874,8 +879,11 @@ TEST_P(PartitionAllocTest, MultiPageAllocs) { uintptr_t second_super_page_offset = slot_span_start & kSuperPageOffsetMask; EXPECT_FALSE(second_super_page_base == first_super_page_base); - // Check that we allocated a guard page for the second page. - EXPECT_EQ(PartitionPageSize(), second_super_page_offset); + // Check that we allocated a guard page and the reserved tag bitmap for + // the second page. + EXPECT_EQ(PartitionPageSize() + + partition_alloc::internal::ReservedTagBitmapSize(), + second_super_page_offset); } } for (i = 0; i < num_slot_spans_needed; ++i) @@ -1792,9 +1800,11 @@ TEST_P(PartitionAllocTest, PartialPages) { TEST_P(PartitionAllocTest, MappingCollision) { size_t num_pages_per_slot_span = GetNumPagesPerSlotSpan(kTestAllocSize); // The -2 is because the first and last partition pages in a super page are - // guard pages. + // guard pages. We also discount the partition pages used for the tag bitmap. size_t num_slot_span_needed = - (NumPartitionPagesPerSuperPage() - 2) / num_pages_per_slot_span; + (NumPartitionPagesPerSuperPage() - 2 - + partition_alloc::internal::NumPartitionPagesPerTagBitmap()) / + num_pages_per_slot_span; size_t num_partition_pages_needed = num_slot_span_needed * num_pages_per_slot_span; @@ -1809,8 +1819,11 @@ TEST_P(PartitionAllocTest, MappingCollision) { uintptr_t slot_spart_start = SlotSpan::ToSlotSpanStart(first_super_page_pages[0]); - EXPECT_EQ(PartitionPageSize(), slot_spart_start & kSuperPageOffsetMask); - uintptr_t super_page = slot_spart_start - PartitionPageSize(); + EXPECT_EQ( + PartitionPageSize() + partition_alloc::internal::ReservedTagBitmapSize(), + slot_spart_start & kSuperPageOffsetMask); + uintptr_t super_page = slot_spart_start - PartitionPageSize() - + partition_alloc::internal::ReservedTagBitmapSize(); // Map a single system page either side of the mapping for our allocations, // with the goal of tripping up alignment of the next mapping. uintptr_t map1 = AllocPages( @@ -1831,9 +1844,11 @@ TEST_P(PartitionAllocTest, MappingCollision) { FreePages(map2, PageAllocationGranularity()); super_page = SlotSpan::ToSlotSpanStart(second_super_page_pages[0]); - EXPECT_EQ(PartitionPageSize(), - reinterpret_cast(super_page) & kSuperPageOffsetMask); - super_page -= PartitionPageSize(); + EXPECT_EQ( + PartitionPageSize() + partition_alloc::internal::ReservedTagBitmapSize(), + reinterpret_cast(super_page) & kSuperPageOffsetMask); + super_page -= + PartitionPageSize() - partition_alloc::internal::ReservedTagBitmapSize(); // Map a single system page either side of the mapping for our allocations, // with the goal of tripping up alignment of the next mapping. map1 = AllocPages(super_page - PageAllocationGranularity(), @@ -4402,6 +4417,74 @@ TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) { kMaxFreeableSpans * bucket_size); } +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + +// Verifies basic PA support for `MTECheckedPtr`. +TEST_P(PartitionAllocTest, PartitionTagBasic) { + const size_t alloc_size = 64 - kExtraAllocSize; + void* ptr1 = allocator.root()->Alloc(alloc_size, type_name); + void* ptr2 = allocator.root()->Alloc(alloc_size, type_name); + void* ptr3 = allocator.root()->Alloc(alloc_size, type_name); + EXPECT_TRUE(ptr1); + EXPECT_TRUE(ptr2); + EXPECT_TRUE(ptr3); + + auto* slot_span = SlotSpan::FromObject(ptr1); + EXPECT_TRUE(slot_span); + + char* char_ptr1 = reinterpret_cast(ptr1); + char* char_ptr2 = reinterpret_cast(ptr2); + char* char_ptr3 = reinterpret_cast(ptr3); + EXPECT_LT(kTestAllocSize, slot_span->bucket->slot_size); + EXPECT_EQ(char_ptr1 + slot_span->bucket->slot_size, char_ptr2); + EXPECT_EQ(char_ptr2 + slot_span->bucket->slot_size, char_ptr3); + constexpr partition_alloc::PartitionTag kTag1 = + static_cast(0xBADA); + constexpr partition_alloc::PartitionTag kTag2 = + static_cast(0xDB8A); + constexpr partition_alloc::PartitionTag kTag3 = + static_cast(0xA3C4); + + partition_alloc::internal::PartitionTagSetValue( + ptr1, slot_span->bucket->slot_size, kTag1); + partition_alloc::internal::PartitionTagSetValue( + ptr2, slot_span->bucket->slot_size, kTag2); + partition_alloc::internal::PartitionTagSetValue( + ptr3, slot_span->bucket->slot_size, kTag3); + + memset(ptr1, 0, alloc_size); + memset(ptr2, 0, alloc_size); + memset(ptr3, 0, alloc_size); + + EXPECT_EQ(kTag1, partition_alloc::internal::PartitionTagGetValue(ptr1)); + EXPECT_EQ(kTag2, partition_alloc::internal::PartitionTagGetValue(ptr2)); + EXPECT_EQ(kTag3, partition_alloc::internal::PartitionTagGetValue(ptr3)); + + EXPECT_TRUE(!memchr(ptr1, static_cast(kTag1), alloc_size)); + EXPECT_TRUE(!memchr(ptr2, static_cast(kTag2), alloc_size)); + + allocator.root()->Free(ptr1); + EXPECT_EQ(kTag2, partition_alloc::internal::PartitionTagGetValue(ptr2)); + + size_t request_size = slot_span->bucket->slot_size - kExtraAllocSize; + void* new_ptr2 = allocator.root()->Realloc(ptr2, request_size, type_name); + EXPECT_EQ(ptr2, new_ptr2); + EXPECT_EQ(kTag3, partition_alloc::internal::PartitionTagGetValue(ptr3)); + + // Add 1B to ensure the object is rellocated to a larger slot. + request_size = slot_span->bucket->slot_size - kExtraAllocSize + 1; + new_ptr2 = allocator.root()->Realloc(ptr2, request_size, type_name); + EXPECT_TRUE(new_ptr2); + EXPECT_NE(ptr2, new_ptr2); + + allocator.root()->Free(new_ptr2); + + EXPECT_EQ(kTag3, partition_alloc::internal::PartitionTagGetValue(ptr3)); + allocator.root()->Free(ptr3); +} + +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + #if BUILDFLAG(IS_ANDROID) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \ BUILDFLAG(IS_CHROMECAST) extern "C" { diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc index 8b0429af45a148..7e753f8c6283b2 100644 --- a/base/allocator/partition_allocator/partition_bucket.cc +++ b/base/allocator/partition_allocator/partition_bucket.cc @@ -19,6 +19,8 @@ #include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_page.h" +#include "base/allocator/partition_allocator/partition_tag.h" +#include "base/allocator/partition_allocator/partition_tag_bitmap.h" #include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/starscan/state_bitmap.h" #include "base/allocator/partition_allocator/tagging.h" @@ -634,6 +636,28 @@ PartitionBucket::AllocNewSlotSpan(PartitionRoot* root, // Double check that we had enough space in the super page for the new slot // span. PA_DCHECK(root->next_partition_page <= root->next_partition_page_end); + +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + PA_DCHECK(root->next_tag_bitmap_page); + uintptr_t next_tag_bitmap_page = + base::bits::AlignUp(reinterpret_cast( + PartitionTagPointer(root->next_partition_page)), + SystemPageSize()); + if (root->next_tag_bitmap_page < next_tag_bitmap_page) { +#if DCHECK_IS_ON() + uintptr_t super_page = + reinterpret_cast(slot_span) & kSuperPageBaseMask; + uintptr_t tag_bitmap = super_page + PartitionPageSize(); + PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize()); + PA_DCHECK(next_tag_bitmap_page > tag_bitmap); +#endif + SetSystemPagesAccess(root->next_tag_bitmap_page, + next_tag_bitmap_page - root->next_tag_bitmap_page, + PageAccessibilityConfiguration::kReadWrite); + root->next_tag_bitmap_page = next_tag_bitmap_page; + } +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + return slot_span; } @@ -666,7 +690,9 @@ ALWAYS_INLINE uintptr_t PartitionBucket::AllocNewSuperPage( std::memory_order_relaxed); root->next_super_page = super_page + kSuperPageSize; - uintptr_t state_bitmap = super_page + PartitionPageSize(); + // TODO(crbug.com/1307514): Add direct map support. + uintptr_t state_bitmap = super_page + PartitionPageSize() + + (is_direct_mapped() ? 0 : ReservedTagBitmapSize()); PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap); const size_t state_bitmap_reservation_size = root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0; @@ -745,6 +771,19 @@ ALWAYS_INLINE uintptr_t PartitionBucket::AllocNewSuperPage( payload < SuperPagesEndFromExtent(current_extent)); } +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + // `root->next_partition_page` currently points at the start of the + // super page payload. We point `root->next_tag_bitmap_page` to the + // corresponding point in the tag bitmap and let the caller + // (slot span allocation) take care of the rest. + root->next_tag_bitmap_page = + base::bits::AlignDown(reinterpret_cast( + PartitionTagPointer(root->next_partition_page)), + SystemPageSize()); + PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize()) + << "tag bitmap can never intrude on metadata partition page"; +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + // If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted // and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make // sure to register the super-page after it has been fully initialized. @@ -841,6 +880,9 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( return_slot = ::partition_alloc::internal::TagMemoryRangeRandomly(return_slot, size); } +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag()); +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) // Add all slots that fit within so far committed pages to the free list. PartitionFreelistEntry* prev_entry = nullptr; @@ -851,6 +893,9 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( next_slot = ::partition_alloc::internal::TagMemoryRangeRandomly(next_slot, size); } +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag()); +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot); if (!slot_span->get_freelist_head()) { PA_DCHECK(!prev_entry); diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h index 9419fa4c44f2da..89f6925be67114 100644 --- a/base/allocator/partition_allocator/partition_page.h +++ b/base/allocator/partition_allocator/partition_page.h @@ -11,6 +11,7 @@ #include #include +#include "base/allocator/buildflags.h" #include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/partition_address_space.h" @@ -19,6 +20,7 @@ #include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_freelist_entry.h" +#include "base/allocator/partition_allocator/partition_tag_bitmap.h" #include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/starscan/state_bitmap.h" #include "base/allocator/partition_allocator/tagging.h" @@ -410,17 +412,27 @@ CommittedStateBitmapSize() { // caller's responsibility to ensure that the bitmaps even exist. ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) { PA_DCHECK(!(super_page % kSuperPageAlignment)); - return super_page + PartitionPageSize(); + return super_page + PartitionPageSize() + + (IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0); } ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(uintptr_t super_page) { return reinterpret_cast( SuperPageStateBitmapAddr(super_page)); } +// Returns the address of the tag bitmap of the `super_page`. Caller must ensure +// that bitmap exists. +ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) { + PA_DCHECK(IsReservationStart(super_page)); + // Skip over the guard pages / metadata. + return super_page + PartitionPageSize(); +} + ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page, bool with_quarantine) { PA_DCHECK(!(super_page % kSuperPageAlignment)); return super_page + PartitionPageSize() + + (IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0) + (with_quarantine ? ReservedStateBitmapSize() : 0); } diff --git a/base/allocator/partition_allocator/partition_root.h b/base/allocator/partition_allocator/partition_root.h index 30f1943b011cb3..9de186d51495c3 100644 --- a/base/allocator/partition_allocator/partition_root.h +++ b/base/allocator/partition_allocator/partition_root.h @@ -56,6 +56,7 @@ #include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_ref_count.h" +#include "base/allocator/partition_allocator/partition_tag.h" #include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/state_bitmap.h" @@ -333,6 +334,12 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot { bool quarantine_always_for_testing = false; +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + partition_alloc::PartitionTag current_partition_tag = 0; + // Points to the end of the committed tag bitmap region. + uintptr_t next_tag_bitmap_page = 0; +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + PartitionRoot() : quarantine_mode(QuarantineMode::kAlwaysDisabled), scan_mode(ScanMode::kDisabled) {} @@ -716,6 +723,17 @@ struct ALIGNAS(64) BASE_EXPORT PartitionRoot { max_empty_slot_spans_dirty_bytes_shift = 0; } +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() { + // TODO(crbug.com/1298696): performance is not an issue. We can use + // random tags in lieu of sequential ones. + auto tag = ++current_partition_tag; + tag += !tag; // Avoid 0. + current_partition_tag = tag; + return tag; + } +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + private: // |buckets| has `kNumBuckets` elements, but we sometimes access it at index // `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout @@ -1119,6 +1137,15 @@ ALWAYS_INLINE void PartitionRoot::FreeNoHooks(void* object) { PA_PREFETCH(slot_span); #endif // defined(PA_HAS_MEMORY_TAGGING) +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + if (!root->IsDirectMappedBucket(slot_span->bucket)) { + size_t slot_size_less_extras = + root->AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size); + partition_alloc::internal::PartitionTagIncrementValue( + object, slot_size_less_extras); + } +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + // TODO(bikineev): Change the condition to LIKELY once PCScan is enabled by // default. if (UNLIKELY(root->ShouldQuarantine(object))) { diff --git a/base/allocator/partition_allocator/partition_tag.h b/base/allocator/partition_allocator/partition_tag.h index 25dde0d5956816..939defe7d8bf1f 100644 --- a/base/allocator/partition_allocator/partition_tag.h +++ b/base/allocator/partition_allocator/partition_tag.h @@ -14,11 +14,14 @@ #include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h" #include "base/allocator/partition_allocator/partition_cookie.h" +#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_tag_bitmap.h" +#include "base/allocator/partition_allocator/reservation_offset_table.h" +#include "base/allocator/partition_allocator/tagging.h" #include "base/base_export.h" #include "build/build_config.h" -namespace partition_alloc::internal { +namespace partition_alloc { #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) @@ -27,35 +30,40 @@ namespace partition_alloc::internal { using PartitionTag = uint8_t; static_assert( - sizeof(PartitionTag) == tag_bitmap::kPartitionTagSize, + sizeof(PartitionTag) == internal::tag_bitmap::kPartitionTagSize, "sizeof(PartitionTag) must be equal to bitmap::kPartitionTagSize."); -static constexpr size_t kInSlotTagBufferSize = 0; +ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) { + // TODO(crbug.com/1307514): Add direct map support. For now, just assume + // that direct maps don't have tags. + PA_DCHECK(internal::IsManagedByNormalBuckets(addr)); -ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) { - // See the comment explaining the layout in partition_tag_bitmap.h. - uintptr_t pointer_as_uintptr = reinterpret_cast(ptr); uintptr_t bitmap_base = - (pointer_as_uintptr & kSuperPageBaseMask) + PartitionPageSize(); - uintptr_t offset = - (pointer_as_uintptr & kSuperPageOffsetMask) - PartitionPageSize(); - // Not to depend on partition_address_space.h and PartitionAllocGigaCage - // feature, use "offset" to see whether the given ptr is_direct_mapped or not. - // DirectMap object should cause this PA_DCHECK's failure, as tags aren't - // currently supported there. - PA_DCHECK(offset >= ReservedTagBitmapSize()); - size_t bitmap_offset = (offset - ReservedTagBitmapSize()) >> - tag_bitmap::kBytesPerPartitionTagShift - << tag_bitmap::kPartitionTagSizeShift; - return reinterpret_cast(bitmap_base + bitmap_offset); + internal::SuperPageTagBitmapAddr(addr & internal::kSuperPageBaseMask); + const size_t bitmap_end_offset = + internal::PartitionPageSize() + internal::ReservedTagBitmapSize(); + PA_DCHECK((addr & internal::kSuperPageOffsetMask) >= bitmap_end_offset); + uintptr_t offset_in_super_page = + (addr & internal::kSuperPageOffsetMask) - bitmap_end_offset; + size_t offset_in_bitmap = offset_in_super_page >> + internal::tag_bitmap::kBytesPerPartitionTagShift + << internal::tag_bitmap::kPartitionTagSizeShift; + return reinterpret_cast(bitmap_base + offset_in_bitmap); } -ALWAYS_INLINE void PartitionTagSetValue(void* ptr, +ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) { + return PartitionTagPointer( + internal::UnmaskPtr(reinterpret_cast(ptr))); +} + +namespace internal { + +ALWAYS_INLINE void PartitionTagSetValue(uintptr_t addr, size_t size, PartitionTag value) { PA_DCHECK((size % tag_bitmap::kBytesPerPartitionTag) == 0); size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift; - PartitionTag* tag_ptr = PartitionTagPointer(ptr); + PartitionTag* tag_ptr = PartitionTagPointer(addr); if (sizeof(PartitionTag) == 1) { memset(tag_ptr, value, tag_count); } else { @@ -64,6 +72,12 @@ ALWAYS_INLINE void PartitionTagSetValue(void* ptr, } } +ALWAYS_INLINE void PartitionTagSetValue(void* ptr, + size_t size, + PartitionTag value) { + PartitionTagSetValue(reinterpret_cast(ptr), size, value); +} + ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) { return *PartitionTagPointer(ptr); } @@ -93,17 +107,19 @@ ALWAYS_INLINE void PartitionTagIncrementValue(void* ptr, size_t size) { PartitionTagSetValue(ptr, size, new_tag); } +} // namespace internal + #else // No-op versions using PartitionTag = uint8_t; -static constexpr size_t kInSlotTagBufferSize = 0; - ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) { PA_NOTREACHED(); return nullptr; } +namespace internal { + ALWAYS_INLINE void PartitionTagSetValue(void*, size_t, PartitionTag) {} ALWAYS_INLINE PartitionTag PartitionTagGetValue(void*) { @@ -114,11 +130,10 @@ ALWAYS_INLINE void PartitionTagClearValue(void* ptr, size_t) {} ALWAYS_INLINE void PartitionTagIncrementValue(void* ptr, size_t size) {} -#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) +} // namespace internal -constexpr size_t kPartitionTagSizeAdjustment = kInSlotTagBufferSize; -constexpr size_t kPartitionTagOffsetAdjustment = kInSlotTagBufferSize; +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) -} // namespace partition_alloc::internal +} // namespace partition_alloc #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_ diff --git a/base/memory/raw_ptr.h b/base/memory/raw_ptr.h index 0da2923aff160c..80ab0a7f639844 100644 --- a/base/memory/raw_ptr.h +++ b/base/memory/raw_ptr.h @@ -14,6 +14,7 @@ #include #include "base/allocator/buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/check.h" #include "base/compiler_specific.h" #include "base/dcheck_is_on.h" @@ -21,15 +22,21 @@ #include "build/build_config.h" #include "build/buildflag.h" -#if BUILDFLAG(USE_BACKUP_REF_PTR) +#if BUILDFLAG(USE_BACKUP_REF_PTR) || \ + defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) // USE_BACKUP_REF_PTR implies USE_PARTITION_ALLOC, needed for code under // allocator/partition_allocator/ to be built. #include "base/allocator/partition_allocator/address_pool_manager_bitmap.h" #include "base/allocator/partition_allocator/partition_address_space.h" -#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/base_export.h" -#endif // BUILDFLAG(USE_BACKUP_REF_PTR) +#endif // BUILDFLAG(USE_BACKUP_REF_PTR) || + // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) +#include "base/allocator/partition_allocator/partition_tag.h" +#include "base/allocator/partition_allocator/tagging.h" +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) #if BUILDFLAG(IS_WIN) #include "base/win/windows_types.h" @@ -128,6 +135,164 @@ struct RawPtrNoOpImpl { IncrementPointerToMemberOperatorCountForTest() {} }; +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + +constexpr int kValidAddressBits = 48; +constexpr uintptr_t kAddressMask = (1ull << kValidAddressBits) - 1; +constexpr int kTagBits = sizeof(uintptr_t) * 8 - kValidAddressBits; +constexpr uintptr_t kTagMask = ~kAddressMask; +constexpr int kTopBitShift = 63; +constexpr uintptr_t kTopBit = 1ull << kTopBitShift; +static_assert(kTopBit << 1 == 0, "kTopBit should really be the top bit"); +static_assert((kTopBit & kTagMask) > 0, + "kTopBit bit must be inside the tag region"); + +// This functionality is outside of MTECheckedPtrImpl, so that it can be +// overridden by tests. +struct MTECheckedPtrImplPartitionAllocSupport { + // Checks if the necessary support is enabled in PartitionAlloc for `ptr`. + template + static ALWAYS_INLINE bool EnabledForPtr(T* ptr) { + auto as_uintptr = + partition_alloc::internal::UnmaskPtr(reinterpret_cast(ptr)); + // MTECheckedPtr algorithms work only when memory is + // allocated by PartitionAlloc, from normal buckets pool. + // + // TODO(crbug.com/1307514): Allow direct-map buckets. + return IsManagedByPartitionAlloc(as_uintptr) && + IsManagedByNormalBuckets(as_uintptr); + } + + // Returns pointer to the tag that protects are pointed by |ptr|. + static ALWAYS_INLINE void* TagPointer(uintptr_t ptr) { + return partition_alloc::PartitionTagPointer(ptr); + } +}; + +template +struct MTECheckedPtrImpl { + // This implementation assumes that pointers are 64 bits long and at least 16 + // top bits are unused. The latter is harder to verify statically, but this is + // true for all currently supported 64-bit architectures (DCHECK when wrapping + // will verify that). + static_assert(sizeof(void*) >= 8, "Need 64-bit pointers"); + + // Wraps a pointer, and returns its uintptr_t representation. + template + static RAW_PTR_FUNC_ATTRIBUTES T* WrapRawPtr(T* ptr) { + uintptr_t addr = reinterpret_cast(ptr); + DCHECK_EQ(ExtractTag(addr), 0ull); + + // Return a not-wrapped |addr|, if it's either nullptr or if the protection + // for this pointer is disabled. + if (!PartitionAllocSupport::EnabledForPtr(ptr)) { + return reinterpret_cast(addr); + } + + // Read the tag and place it in the top bits of the address. + // Even if PartitionAlloc's tag has less than kTagBits, we'll read + // what's given and pad the rest with 0s. + static_assert(sizeof(partition_alloc::PartitionTag) * 8 <= kTagBits, ""); + uintptr_t tag = *(static_cast( + PartitionAllocSupport::TagPointer(addr))); + + tag <<= kValidAddressBits; + addr |= tag; + return reinterpret_cast(addr); + } + + // Notifies the allocator when a wrapped pointer is being removed or replaced. + // No-op for MTECheckedPtrImpl. + template + static RAW_PTR_FUNC_ATTRIBUTES void ReleaseWrappedPtr(T*) {} + + // Unwraps the pointer's uintptr_t representation, while asserting that memory + // hasn't been freed. The function is allowed to crash on nullptr. + template + static RAW_PTR_FUNC_ATTRIBUTES T* SafelyUnwrapPtrForDereference( + T* wrapped_ptr) { + uintptr_t wrapped_addr = reinterpret_cast(wrapped_ptr); + uintptr_t tag = wrapped_addr >> kValidAddressBits; + if (tag > 0) { + // Read the tag provided by PartitionAlloc. + // + // Cast to volatile to ensure memory is read. E.g. in a tight loop, the + // compiler could cache the value in a register and thus could miss that + // another thread freed memory and changed tag. + uintptr_t read_tag = + *static_cast( + PartitionAllocSupport::TagPointer(ExtractAddress(wrapped_addr))); + if (UNLIKELY(tag != read_tag)) + IMMEDIATE_CRASH(); + return reinterpret_cast(wrapped_addr & kAddressMask); + } + return wrapped_ptr; + } + + // Unwraps the pointer's uintptr_t representation, while asserting that memory + // hasn't been freed. The function must handle nullptr gracefully. + template + static RAW_PTR_FUNC_ATTRIBUTES T* SafelyUnwrapPtrForExtraction( + T* wrapped_ptr) { + // SafelyUnwrapPtrForDereference handles nullptr case well. + return SafelyUnwrapPtrForDereference(wrapped_ptr); + } + + // Unwraps the pointer's uintptr_t representation, without making an assertion + // on whether memory was freed or not. + template + static RAW_PTR_FUNC_ATTRIBUTES T* UnsafelyUnwrapPtrForComparison( + T* wrapped_ptr) { + return ExtractPtr(wrapped_ptr); + } + + // Upcasts the wrapped pointer. + template + static RAW_PTR_FUNC_ATTRIBUTES constexpr To* Upcast(From* wrapped_ptr) { + static_assert(std::is_convertible::value, + "From must be convertible to To."); + + // The top-bit tag must not affect the result of upcast. + return static_cast(wrapped_ptr); + } + + // Advance the wrapped pointer by |delta| bytes. + template + static RAW_PTR_FUNC_ATTRIBUTES T* Advance(T* wrapped_ptr, + ptrdiff_t delta_elem) { + return wrapped_ptr + delta_elem; + } + + // Returns a copy of a wrapped pointer, without making an assertion + // on whether memory was freed or not. + template + static RAW_PTR_FUNC_ATTRIBUTES T* Duplicate(T* wrapped_ptr) { + return wrapped_ptr; + } + + // This is for accounting only, used by unit tests. + static RAW_PTR_FUNC_ATTRIBUTES void IncrementSwapCountForTest() {} + static RAW_PTR_FUNC_ATTRIBUTES void + IncrementPointerToMemberOperatorCountForTest() {} + + private: + static ALWAYS_INLINE uintptr_t ExtractAddress(uintptr_t wrapped_ptr) { + return wrapped_ptr & kAddressMask; + } + + template + static ALWAYS_INLINE T* ExtractPtr(T* wrapped_ptr) { + return reinterpret_cast( + ExtractAddress(reinterpret_cast(wrapped_ptr))); + } + + static ALWAYS_INLINE uintptr_t ExtractTag(uintptr_t wrapped_ptr) { + return wrapped_ptr & kTagMask; + } +}; + +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + #if BUILDFLAG(USE_BACKUP_REF_PTR) #if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) @@ -503,6 +668,11 @@ using RawPtrBanDanglingIfSupported = #elif BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) using RawPtrMayDangle = internal::AsanBackupRefPtrImpl; using RawPtrBanDanglingIfSupported = internal::AsanBackupRefPtrImpl; +#elif defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) +using RawPtrMayDangle = internal::MTECheckedPtrImpl< + internal::MTECheckedPtrImplPartitionAllocSupport>; +using RawPtrBanDanglingIfSupported = internal::MTECheckedPtrImpl< + internal::MTECheckedPtrImplPartitionAllocSupport>; #else using RawPtrMayDangle = internal::RawPtrNoOpImpl; using RawPtrBanDanglingIfSupported = internal::RawPtrNoOpImpl; diff --git a/base/memory/raw_ptr_unittest.cc b/base/memory/raw_ptr_unittest.cc index 4965ae689250ff..cf79fa172138ec 100644 --- a/base/memory/raw_ptr_unittest.cc +++ b/base/memory/raw_ptr_unittest.cc @@ -14,6 +14,7 @@ #include "base/allocator/partition_alloc_support.h" #include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h" #include "base/allocator/partition_allocator/partition_alloc.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/logging.h" #include "build/build_config.h" #include "build/buildflag.h" @@ -24,6 +25,10 @@ #include "third_party/perfetto/include/perfetto/test/traced_value_test_support.h" // no-presubmit-check nogncheck #endif // BUILDFLAG(ENABLE_BASE_TRACING) +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) +#include "base/allocator/partition_allocator/partition_tag.h" +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + using testing::AllOf; using testing::HasSubstr; using testing::Test; @@ -1387,5 +1392,140 @@ TEST(AsanBackupRefPtrImpl, Instantiation) { } #endif +#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + +static constexpr size_t kTagOffsetForTest = 2; + +struct MTECheckedPtrImplPartitionAllocSupportForTest { + static bool EnabledForPtr(void* ptr) { return !!ptr; } + + static ALWAYS_INLINE void* TagPointer(uintptr_t ptr) { + return reinterpret_cast(ptr - kTagOffsetForTest); + } +}; + +using MTECheckedPtrImplForTest = + MTECheckedPtrImpl; + +TEST(MTECheckedPtrImpl, WrapAndSafelyUnwrap) { + // Create a fake allocation, with first 2B for tag. + // It is ok to use a fake allocation, instead of PartitionAlloc, because + // MTECheckedPtrImplForTest fakes the functionality is enabled for this + // pointer and points to the tag appropriately. + unsigned char bytes[] = {0xBA, 0x42, 0x78, 0x89}; + void* ptr = bytes + kTagOffsetForTest; + ASSERT_EQ(0x78, *static_cast(ptr)); + uintptr_t addr = reinterpret_cast(ptr); + + uintptr_t mask = 0xFFFFFFFFFFFFFFFF; + if (sizeof(partition_alloc::PartitionTag) < 2) + mask = 0x00FFFFFFFFFFFFFF; + + uintptr_t wrapped = + reinterpret_cast(MTECheckedPtrImplForTest::WrapRawPtr(ptr)); + // The bytes before the allocation will be used as tag (in reverse + // order due to little-endianness). + ASSERT_EQ(wrapped, (addr | 0x42BA000000000000) & mask); + ASSERT_EQ(MTECheckedPtrImplForTest::SafelyUnwrapPtrForDereference( + reinterpret_cast(wrapped)), + ptr); + + // Modify the tag in the fake allocation. + bytes[0] |= 0x40; + wrapped = + reinterpret_cast(MTECheckedPtrImplForTest::WrapRawPtr(ptr)); + ASSERT_EQ(wrapped, (addr | 0x42FA000000000000) & mask); + ASSERT_EQ(MTECheckedPtrImplForTest::SafelyUnwrapPtrForDereference( + reinterpret_cast(wrapped)), + ptr); +} + +TEST(MTECheckedPtrImpl, SafelyUnwrapDisabled) { + // Create a fake allocation, with first 2B for tag. + // It is ok to use a fake allocation, instead of PartitionAlloc, because + // MTECheckedPtrImplForTest fakes the functionality is enabled for this + // pointer and points to the tag appropriately. + unsigned char bytes[] = {0xBA, 0x42, 0x78, 0x89}; + unsigned char* ptr = bytes + kTagOffsetForTest; + ASSERT_EQ(0x78, *ptr); + ASSERT_EQ(MTECheckedPtrImplForTest::SafelyUnwrapPtrForDereference(ptr), ptr); +} + +TEST(MTECheckedPtrImpl, CrashOnTagMismatch) { + // Create a fake allocation, using the first two bytes for the tag. + // It is ok to use a fake allocation, instead of PartitionAlloc, because + // MTECheckedPtrImplForTest fakes the functionality is enabled for this + // pointer and points to the tag appropriately. + unsigned char bytes[] = {0xBA, 0x42, 0x78, 0x89}; + unsigned char* ptr = + MTECheckedPtrImplForTest::WrapRawPtr(bytes + kTagOffsetForTest); + EXPECT_EQ(*MTECheckedPtrImplForTest::SafelyUnwrapPtrForDereference(ptr), + 0x78); + // Clobber the tag associated with the fake allocation. + bytes[0] = 0; + EXPECT_DEATH_IF_SUPPORTED( + if (*MTECheckedPtrImplForTest::SafelyUnwrapPtrForDereference(ptr) == + 0x78) return, + ""); +} + +#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \ + BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) + +// This test works only when PartitionAlloc is used, when tags are enabled. +// Don't enable it when MEMORY_TOOL_REPLACES_ALLOCATOR is defined, because it +// makes PartitionAlloc take a different path that doesn't provide tags, thus no +// crash on UaF, thus missing the EXPECT_DEATH_IF_SUPPORTED expectation. +TEST(MTECheckedPtrImpl, CrashOnUseAfterFree) { + int* unwrapped_ptr = new int; + // Use the actual CheckedPtr implementation, not a test substitute, to + // exercise real PartitionAlloc paths. + raw_ptr ptr = unwrapped_ptr; + *ptr = 42; + EXPECT_EQ(*ptr, 42); + delete unwrapped_ptr; + EXPECT_DEATH_IF_SUPPORTED(if (*ptr == 42) return, ""); +} + +TEST(MTECheckedPtrImpl, CrashOnUseAfterFree_WithOffset) { + const uint8_t kSize = 100; + uint8_t* unwrapped_ptr = new uint8_t[kSize]; + // Use the actual CheckedPtr implementation, not a test substitute, to + // exercise real PartitionAlloc paths. + raw_ptr ptrs[kSize]; + for (uint8_t i = 0; i < kSize; ++i) { + ptrs[i] = static_cast(unwrapped_ptr) + i; + } + for (uint8_t i = 0; i < kSize; ++i) { + *ptrs[i] = 42 + i; + EXPECT_TRUE(*ptrs[i] == 42 + i); + } + delete[] unwrapped_ptr; + for (uint8_t i = 0; i < kSize; i += 15) { + EXPECT_DEATH_IF_SUPPORTED(if (*ptrs[i] == 42 + i) return, ""); + } +} + +TEST(MTECheckedPtrImpl, AdvancedPointerShiftedAppropriately) { + uint64_t* unwrapped_ptr = new uint64_t[6]; + raw_ptr ptr = unwrapped_ptr; + + // This is unwrapped, but still useful for ensuring that the + // shift is sized in `uint64_t`s. + auto original_addr = reinterpret_cast(ptr.get()); + + ptr += 5; + EXPECT_EQ(reinterpret_cast(ptr.get()) - original_addr, + 5 * sizeof(uint64_t)); + delete[] unwrapped_ptr; + + EXPECT_DEATH_IF_SUPPORTED(*ptr, ""); +} + +#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && + // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) + +#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) + } // namespace internal } // namespace base