From 916dcd89d879d2638d4c9995af75ff298c0f67ac Mon Sep 17 00:00:00 2001 From: Kalvin Lee Date: Mon, 3 Oct 2022 10:26:48 +0000 Subject: [PATCH] [PA] Remove "GigaCage" references, 4 of N This change removes the rest of the references to "GigaCage" in PartitionAlloc code, swapping in "pool" verbiage instead. Several structs and functions are trivially renamed. Bug: 1369834 Change-Id: Iac752216549372a001e117c233614cfb033e8a46 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3929883 Reviewed-by: Bartek Nowierski Commit-Queue: Kalvin Lee Cr-Commit-Position: refs/heads/main@{#1054106} --- .../partition_address_space.cc | 18 ++++---- .../partition_address_space.h | 41 ++++++++++--------- .../partition_alloc_config.h | 6 +-- .../partition_allocator/partition_bucket.cc | 33 +++++++-------- .../partition_allocator/partition_root.h | 4 +- 5 files changed, 50 insertions(+), 52 deletions(-) diff --git a/base/allocator/partition_allocator/partition_address_space.cc b/base/allocator/partition_allocator/partition_address_space.cc index 98b87d177a8fa..3f3a4e8f8ab2c 100644 --- a/base/allocator/partition_allocator/partition_address_space.cc +++ b/base/allocator/partition_allocator/partition_address_space.cc @@ -66,18 +66,18 @@ bool IsLegacyWindowsVersion() { } #endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) -PA_NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() { +PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() { PA_NO_CODE_FOLDING(); PA_CHECK(false); } -PA_NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() { +PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() { PA_NO_CODE_FOLDING(); PA_CHECK(false); } #endif // BUILDFLAG(IS_WIN) -PA_NOINLINE void HandleGigaCageAllocFailure() { +PA_NOINLINE void HandlePoolAllocFailure() { PA_NO_CODE_FOLDING(); uint32_t alloc_page_error_code = GetAllocPageErrorCode(); PA_DEBUG_DATA_ON_STACK("error", static_cast(alloc_page_error_code)); @@ -87,12 +87,12 @@ PA_NOINLINE void HandleGigaCageAllocFailure() { if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) { // The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE, // it must be VA space exhaustion. - HandleGigaCageAllocFailureOutOfVASpace(); + HandlePoolAllocFailureOutOfVASpace(); } else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) { // On Windows <8.1, MEM_RESERVE increases commit charge to account for // not-yet-committed PTEs needed to cover that VA space, if it was to be // committed (see crbug.com/1101421#c16). - HandleGigaCageAllocFailureOutOfCommitCharge(); + HandlePoolAllocFailureOutOfCommitCharge(); } else #endif // BUILDFLAG(IS_WIN) { @@ -103,7 +103,7 @@ PA_NOINLINE void HandleGigaCageAllocFailure() { } // namespace alignas(kPartitionCachelineSize) - PartitionAddressSpace::GigaCageSetup PartitionAddressSpace::setup_; + PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_; #if defined(PA_ENABLE_SHADOW_METADATA) std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0; @@ -178,7 +178,7 @@ void PartitionAddressSpace::Init() { PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc, regular_pool_fd); if (!setup_.regular_pool_base_address_) - HandleGigaCageAllocFailure(); + HandlePoolAllocFailure(); #if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1); #endif @@ -210,7 +210,7 @@ void PartitionAddressSpace::Init() { PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc, brp_pool_fd); if (!base_address) - HandleGigaCageAllocFailure(); + HandlePoolAllocFailure(); setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize; #if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1); @@ -235,7 +235,7 @@ void PartitionAddressSpace::Init() { #endif // PA_STARSCAN_USE_CARD_TABLE #if defined(PA_ENABLE_SHADOW_METADATA) - // Reserve memory for the shadow GigaCage + // Reserve memory for the shadow pools. uintptr_t regular_pool_shadow_address = AllocPages(regular_pool_size, regular_pool_size, PageAccessibilityConfiguration::kInaccessible, diff --git a/base/allocator/partition_allocator/partition_address_space.h b/base/allocator/partition_allocator/partition_address_space.h index 6d787c6c15c38..9456a10c0669e 100644 --- a/base/allocator/partition_allocator/partition_address_space.h +++ b/base/allocator/partition_allocator/partition_address_space.h @@ -32,10 +32,12 @@ namespace partition_alloc { namespace internal { // Reserves address space for PartitionAllocator. +// +// This reserves space for the regular and BRP pools. If callers would +// like to use the configurable pool, they must manually set up the +// address space themselves and provide the mapping to PartitionAlloc. class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { public: - // BRP stands for BackupRefPtr. GigaCage is split into pools, one which - // supports BackupRefPtr and one that doesn't. static PA_ALWAYS_INLINE internal::pool_handle GetRegularPool() { return setup_.regular_pool_; } @@ -54,8 +56,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { return setup_.brp_pool_; } - // The Configurable Pool can be created inside an existing mapping and so will - // be located outside PartitionAlloc's GigaCage. + // The Configurable Pool can be created inside an existing mapping; we + // keep the information with the other pool setup data. static PA_ALWAYS_INLINE internal::pool_handle GetConfigurablePool() { return setup_.configurable_pool_; } @@ -91,7 +93,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { return kConfigurablePoolMinSize; } - // Initialize the GigaCage and the Pools inside of it. + // Initialize pools. + // // This function must only be called from the main thread. static void Init(); // Initialize the ConfigurablePool at the given address |pool_base|. It must @@ -194,25 +197,25 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { } #endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) - // On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is - // where all allocations have a BRP ref-count, thus pointers pointing there - // can use a BRP protection against UaF. Allocations in the other pools don't - // have that. + // On 64-bit systems, PA allocates from several contiguous, mutually disjoint + // pools. The BRP pool is where all allocations have a BRP ref-count, thus + // pointers pointing there can use a BRP protection against UaF. Allocations + // in the other pools don't have that. // // Pool sizes have to be the power of two. Each pool will be aligned at its // own size boundary. // // NOTE! The BRP pool must be preceded by a reserved region, where allocations - // are forbidden. This is to prevent a pointer immediately past a non-GigaCage + // are forbidden. This is to prevent a pointer to the end of a non-BRP-pool // allocation from falling into the BRP pool, thus triggering BRP mechanism // and likely crashing. This "forbidden zone" can be as small as 1B, but it's // simpler to just reserve an allocation granularity unit. // // The ConfigurablePool is an optional Pool that can be created inside an - // existing mapping by the embedder, and so will be outside of the GigaCage. - // This Pool can be used when certain PA allocations must be located inside a - // given virtual address region. One use case for this Pool is V8's virtual - // memory cage, which requires that ArrayBuffers be located inside of it. + // existing mapping by the embedder. This Pool can be used when certain PA + // allocations must be located inside a given virtual address region. One + // use case for this Pool is V8's virtual memory cage, which requires that + // ArrayBuffers be located inside of it. static constexpr size_t kRegularPoolSize = kPoolMaxSize; static constexpr size_t kBRPPoolSize = kPoolMaxSize; static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) && @@ -265,11 +268,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { static constexpr uintptr_t kUninitializedPoolBaseAddress = static_cast(-1); - struct GigaCageSetup { + struct PoolSetup { // Before PartitionAddressSpace::Init(), no allocation are allocated from a // reserved address space. Therefore, set *_pool_base_address_ initially to // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false. - constexpr GigaCageSetup() + constexpr PoolSetup() : regular_pool_base_address_(kUninitializedPoolBaseAddress), brp_pool_base_address_(kUninitializedPoolBaseAddress), configurable_pool_base_address_(kUninitializedPoolBaseAddress), @@ -303,15 +306,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { char one_cacheline_[kPartitionCachelineSize]; }; }; - static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0, - "GigaCageSetup has to fill a cacheline(s)"); + static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0, + "PoolSetup has to fill a cacheline(s)"); // See the comment describing the address layout above. // // These are write-once fields, frequently accessed thereafter. Make sure they // don't share a cacheline with other, potentially writeable data, through // alignment and padding. - alignas(kPartitionCachelineSize) static GigaCageSetup setup_; + alignas(kPartitionCachelineSize) static PoolSetup setup_; #if defined(PA_ENABLE_SHADOW_METADATA) static std::ptrdiff_t regular_pool_shadow_offset_; diff --git a/base/allocator/partition_allocator/partition_alloc_config.h b/base/allocator/partition_allocator/partition_alloc_config.h index 1d2823482750c..7d3934b8f4aef 100644 --- a/base/allocator/partition_allocator/partition_alloc_config.h +++ b/base/allocator/partition_allocator/partition_alloc_config.h @@ -254,10 +254,10 @@ constexpr bool kUseLazyCommit = false; // Enable shadow metadata. // -// With this flag, a shadow GigaCage will be mapped, on which writable shadow +// With this flag, shadow pools will be mapped, on which writable shadow // metadatas are placed, and the real metadatas are set to read-only instead. -// This feature is only enabled with 64-bits CPUs because GigaCage does not -// exist with 32-bits CPUs. +// This feature is only enabled with 64-bit environment because pools work +// differently with 32-bits pointers (see glossary). #if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \ defined(PA_HAS_64_BITS_POINTERS) #define PA_ENABLE_SHADOW_METADATA diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc index b2e4d9ef28aef..73859b9532b84 100644 --- a/base/allocator/partition_allocator/partition_bucket.cc +++ b/base/allocator/partition_allocator/partition_bucket.cc @@ -97,8 +97,8 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) { #endif // !defined(PA_HAS_64_BITS_POINTERS) && // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) -// Reserves |requested_size| worth of super pages from the specified pool of the -// GigaCage. If BRP pool is requested this function will honor BRP block list. +// Reserves |requested_size| worth of super pages from the specified pool. +// If BRP pool is requested this function will honor BRP block list. // // The returned address will be aligned to kSuperPageSize, and so // |requested_address| should be. |requested_size| doesn't have to be, however. @@ -114,9 +114,9 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) { // AreAllowedSuperPagesForBRPPool. // - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is // designed to not need locking. -uintptr_t ReserveMemoryFromGigaCage(pool_handle pool, - uintptr_t requested_address, - size_t requested_size) { +uintptr_t ReserveMemoryFromPool(pool_handle pool, + uintptr_t requested_address, + size_t requested_size) { PA_DCHECK(!(requested_address % kSuperPageSize)); uintptr_t reserved_address = AddressPoolManager::GetInstance().Reserve( @@ -242,9 +242,9 @@ SlotSpanMetadata* PartitionDirectMap( { // Getting memory for direct-mapped allocations doesn't interact with the // rest of the allocator, but takes a long time, as it involves several - // system calls. With GigaCage, no mmap() (or equivalent) call is made on 64 - // bit systems, but page permissions are changed with mprotect(), which is a - // syscall. + // system calls. Although no mmap() (or equivalent) calls are made on + // 64 bit systems, page permissions are changed with mprotect(), which is + // a syscall. // // These calls are almost always slow (at least a couple us per syscall on a // desktop Linux machine), and they also have a very long latency tail, @@ -277,17 +277,15 @@ SlotSpanMetadata* PartitionDirectMap( PA_DCHECK(slot_size <= available_reservation_size); #endif - // Allocate from GigaCage. Route to the appropriate GigaCage pool based on - // BackupRefPtr support. pool_handle pool = root->ChoosePool(); uintptr_t reservation_start; { - // Reserving memory from the GigaCage is actually not a syscall on 64 bit + // Reserving memory from the pool is actually not a syscall on 64 bit // platforms. #if !defined(PA_HAS_64_BITS_POINTERS) ScopedSyscallTimer timer{root}; #endif - reservation_start = ReserveMemoryFromGigaCage(pool, 0, reservation_size); + reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size); } if (PA_UNLIKELY(!reservation_start)) { if (return_null) @@ -339,7 +337,7 @@ SlotSpanMetadata* PartitionDirectMap( #endif // No need to hold root->lock_. Now that memory is reserved, no other - // overlapping region can be allocated (because of how GigaCage works), + // overlapping region can be allocated (because of how pools work), // so no other thread can update the same offset table entries at the // same time. Furthermore, nobody will be ready these offsets until this // function returns. @@ -411,7 +409,7 @@ SlotSpanMetadata* PartitionDirectMap( SlotSpanMetadata(&metadata->bucket); // It is typically possible to map a large range of inaccessible pages, and - // this is leveraged in multiple places, including the GigaCage. However, + // this is leveraged in multiple places, including the pools. However, // this doesn't mean that we can commit all this memory. For the vast // majority of allocations, this just means that we crash in a slightly // different place, but for callers ready to handle failures, we have to @@ -731,10 +729,8 @@ uintptr_t PartitionBucket::AllocNewSuperPageSpan( // page table bloat and not fragmenting address spaces in 32 bit // architectures. uintptr_t requested_address = root->next_super_page; - // Allocate from GigaCage. Route to the appropriate GigaCage pool based on - // BackupRefPtr support. pool_handle pool = root->ChoosePool(); - uintptr_t super_page_span_start = ReserveMemoryFromGigaCage( + uintptr_t super_page_span_start = ReserveMemoryFromPool( pool, requested_address, super_page_count * kSuperPageSize); if (PA_UNLIKELY(!super_page_span_start)) { if (flags & AllocFlags::kReturnNull) @@ -1148,8 +1144,7 @@ bool PartitionBucket::SetNewActiveSlotSpan() { ++num_full_slot_spans; // Overflow. Most likely a correctness issue in the code. It is in theory // possible that the number of full slot spans really reaches (1 << 24), - // but this is very unlikely (and not possible with most GigaCage - // settings). + // but this is very unlikely (and not possible with most pool settings). PA_CHECK(num_full_slot_spans); // Not necessary but might help stop accidents. slot_span->next_slot_span = nullptr; diff --git a/base/allocator/partition_allocator/partition_root.h b/base/allocator/partition_allocator/partition_root.h index 103b09283705f..155057da3bbea 100644 --- a/base/allocator/partition_allocator/partition_root.h +++ b/base/allocator/partition_allocator/partition_root.h @@ -1171,8 +1171,8 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeWithFlags( // Returns whether MTE is supported for this partition root. Because MTE stores // tagging information in the high bits of the pointer, it causes issues with // components like V8's ArrayBuffers which use custom pointer representations. -// All custom representations encountered so far rely on a caged memory address -// area / configurable pool, so we use that as a proxy. +// All custom representations encountered so far rely on an "is in configurable +// pool?" check, so we use that as a proxy. template PA_ALWAYS_INLINE bool PartitionRoot::IsMemoryTaggingEnabled() const {