Skip to content

Commit

Permalink
[PA] Remove "GigaCage" references, 4 of N
Browse files Browse the repository at this point in the history
This change removes the rest of the references to "GigaCage" in
PartitionAlloc code, swapping in "pool" verbiage instead. Several
structs and functions are trivially renamed.

Bug: 1369834
Change-Id: Iac752216549372a001e117c233614cfb033e8a46
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3929883
Reviewed-by: Bartek Nowierski <bartekn@chromium.org>
Commit-Queue: Kalvin Lee <kdlee@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1054106}
  • Loading branch information
Kalvin Lee authored and Chromium LUCI CQ committed Oct 3, 2022
1 parent c00b988 commit 916dcd8
Show file tree
Hide file tree
Showing 5 changed files with 50 additions and 52 deletions.
18 changes: 9 additions & 9 deletions base/allocator/partition_allocator/partition_address_space.cc
Expand Up @@ -66,18 +66,18 @@ bool IsLegacyWindowsVersion() {
}
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)

PA_NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() {
PA_NOINLINE void HandlePoolAllocFailureOutOfVASpace() {
PA_NO_CODE_FOLDING();
PA_CHECK(false);
}

PA_NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
PA_NOINLINE void HandlePoolAllocFailureOutOfCommitCharge() {
PA_NO_CODE_FOLDING();
PA_CHECK(false);
}
#endif // BUILDFLAG(IS_WIN)

PA_NOINLINE void HandleGigaCageAllocFailure() {
PA_NOINLINE void HandlePoolAllocFailure() {
PA_NO_CODE_FOLDING();
uint32_t alloc_page_error_code = GetAllocPageErrorCode();
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
Expand All @@ -87,12 +87,12 @@ PA_NOINLINE void HandleGigaCageAllocFailure() {
if (alloc_page_error_code == ERROR_NOT_ENOUGH_MEMORY) {
// The error code says NOT_ENOUGH_MEMORY, but since we only do MEM_RESERVE,
// it must be VA space exhaustion.
HandleGigaCageAllocFailureOutOfVASpace();
HandlePoolAllocFailureOutOfVASpace();
} else if (alloc_page_error_code == ERROR_COMMITMENT_LIMIT) {
// On Windows <8.1, MEM_RESERVE increases commit charge to account for
// not-yet-committed PTEs needed to cover that VA space, if it was to be
// committed (see crbug.com/1101421#c16).
HandleGigaCageAllocFailureOutOfCommitCharge();
HandlePoolAllocFailureOutOfCommitCharge();
} else
#endif // BUILDFLAG(IS_WIN)
{
Expand All @@ -103,7 +103,7 @@ PA_NOINLINE void HandleGigaCageAllocFailure() {
} // namespace

alignas(kPartitionCachelineSize)
PartitionAddressSpace::GigaCageSetup PartitionAddressSpace::setup_;
PartitionAddressSpace::PoolSetup PartitionAddressSpace::setup_;

#if defined(PA_ENABLE_SHADOW_METADATA)
std::ptrdiff_t PartitionAddressSpace::regular_pool_shadow_offset_ = 0;
Expand Down Expand Up @@ -178,7 +178,7 @@ void PartitionAddressSpace::Init() {
PageAccessibilityConfiguration::kInaccessible,
PageTag::kPartitionAlloc, regular_pool_fd);
if (!setup_.regular_pool_base_address_)
HandleGigaCageAllocFailure();
HandlePoolAllocFailure();
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.regular_pool_base_mask_ = ~(regular_pool_size - 1);
#endif
Expand Down Expand Up @@ -210,7 +210,7 @@ void PartitionAddressSpace::Init() {
PageAccessibilityConfiguration::kInaccessible, PageTag::kPartitionAlloc,
brp_pool_fd);
if (!base_address)
HandleGigaCageAllocFailure();
HandlePoolAllocFailure();
setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize;
#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)
setup_.brp_pool_base_mask_ = ~(brp_pool_size - 1);
Expand All @@ -235,7 +235,7 @@ void PartitionAddressSpace::Init() {
#endif // PA_STARSCAN_USE_CARD_TABLE

#if defined(PA_ENABLE_SHADOW_METADATA)
// Reserve memory for the shadow GigaCage
// Reserve memory for the shadow pools.
uintptr_t regular_pool_shadow_address =
AllocPages(regular_pool_size, regular_pool_size,
PageAccessibilityConfiguration::kInaccessible,
Expand Down
41 changes: 22 additions & 19 deletions base/allocator/partition_allocator/partition_address_space.h
Expand Up @@ -32,10 +32,12 @@ namespace partition_alloc {
namespace internal {

// Reserves address space for PartitionAllocator.
//
// This reserves space for the regular and BRP pools. If callers would
// like to use the configurable pool, they must manually set up the
// address space themselves and provide the mapping to PartitionAlloc.
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
public:
// BRP stands for BackupRefPtr. GigaCage is split into pools, one which
// supports BackupRefPtr and one that doesn't.
static PA_ALWAYS_INLINE internal::pool_handle GetRegularPool() {
return setup_.regular_pool_;
}
Expand All @@ -54,8 +56,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return setup_.brp_pool_;
}

// The Configurable Pool can be created inside an existing mapping and so will
// be located outside PartitionAlloc's GigaCage.
// The Configurable Pool can be created inside an existing mapping; we
// keep the information with the other pool setup data.
static PA_ALWAYS_INLINE internal::pool_handle GetConfigurablePool() {
return setup_.configurable_pool_;
}
Expand Down Expand Up @@ -91,7 +93,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return kConfigurablePoolMinSize;
}

// Initialize the GigaCage and the Pools inside of it.
// Initialize pools.
//
// This function must only be called from the main thread.
static void Init();
// Initialize the ConfigurablePool at the given address |pool_base|. It must
Expand Down Expand Up @@ -194,25 +197,25 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
}
#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE)

// On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is
// where all allocations have a BRP ref-count, thus pointers pointing there
// can use a BRP protection against UaF. Allocations in the other pools don't
// have that.
// On 64-bit systems, PA allocates from several contiguous, mutually disjoint
// pools. The BRP pool is where all allocations have a BRP ref-count, thus
// pointers pointing there can use a BRP protection against UaF. Allocations
// in the other pools don't have that.
//
// Pool sizes have to be the power of two. Each pool will be aligned at its
// own size boundary.
//
// NOTE! The BRP pool must be preceded by a reserved region, where allocations
// are forbidden. This is to prevent a pointer immediately past a non-GigaCage
// are forbidden. This is to prevent a pointer to the end of a non-BRP-pool
// allocation from falling into the BRP pool, thus triggering BRP mechanism
// and likely crashing. This "forbidden zone" can be as small as 1B, but it's
// simpler to just reserve an allocation granularity unit.
//
// The ConfigurablePool is an optional Pool that can be created inside an
// existing mapping by the embedder, and so will be outside of the GigaCage.
// This Pool can be used when certain PA allocations must be located inside a
// given virtual address region. One use case for this Pool is V8's virtual
// memory cage, which requires that ArrayBuffers be located inside of it.
// existing mapping by the embedder. This Pool can be used when certain PA
// allocations must be located inside a given virtual address region. One
// use case for this Pool is V8's virtual memory cage, which requires that
// ArrayBuffers be located inside of it.
static constexpr size_t kRegularPoolSize = kPoolMaxSize;
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
Expand Down Expand Up @@ -265,11 +268,11 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr uintptr_t kUninitializedPoolBaseAddress =
static_cast<uintptr_t>(-1);

struct GigaCageSetup {
struct PoolSetup {
// Before PartitionAddressSpace::Init(), no allocation are allocated from a
// reserved address space. Therefore, set *_pool_base_address_ initially to
// -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
constexpr GigaCageSetup()
constexpr PoolSetup()
: regular_pool_base_address_(kUninitializedPoolBaseAddress),
brp_pool_base_address_(kUninitializedPoolBaseAddress),
configurable_pool_base_address_(kUninitializedPoolBaseAddress),
Expand Down Expand Up @@ -303,15 +306,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
char one_cacheline_[kPartitionCachelineSize];
};
};
static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0,
"GigaCageSetup has to fill a cacheline(s)");
static_assert(sizeof(PoolSetup) % kPartitionCachelineSize == 0,
"PoolSetup has to fill a cacheline(s)");

// See the comment describing the address layout above.
//
// These are write-once fields, frequently accessed thereafter. Make sure they
// don't share a cacheline with other, potentially writeable data, through
// alignment and padding.
alignas(kPartitionCachelineSize) static GigaCageSetup setup_;
alignas(kPartitionCachelineSize) static PoolSetup setup_;

#if defined(PA_ENABLE_SHADOW_METADATA)
static std::ptrdiff_t regular_pool_shadow_offset_;
Expand Down
6 changes: 3 additions & 3 deletions base/allocator/partition_allocator/partition_alloc_config.h
Expand Up @@ -254,10 +254,10 @@ constexpr bool kUseLazyCommit = false;

// Enable shadow metadata.
//
// With this flag, a shadow GigaCage will be mapped, on which writable shadow
// With this flag, shadow pools will be mapped, on which writable shadow
// metadatas are placed, and the real metadatas are set to read-only instead.
// This feature is only enabled with 64-bits CPUs because GigaCage does not
// exist with 32-bits CPUs.
// This feature is only enabled with 64-bit environment because pools work
// differently with 32-bits pointers (see glossary).
#if BUILDFLAG(ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS) && \
defined(PA_HAS_64_BITS_POINTERS)
#define PA_ENABLE_SHADOW_METADATA
Expand Down
33 changes: 14 additions & 19 deletions base/allocator/partition_allocator/partition_bucket.cc
Expand Up @@ -97,8 +97,8 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
#endif // !defined(PA_HAS_64_BITS_POINTERS) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

// Reserves |requested_size| worth of super pages from the specified pool of the
// GigaCage. If BRP pool is requested this function will honor BRP block list.
// Reserves |requested_size| worth of super pages from the specified pool.
// If BRP pool is requested this function will honor BRP block list.
//
// The returned address will be aligned to kSuperPageSize, and so
// |requested_address| should be. |requested_size| doesn't have to be, however.
Expand All @@ -114,9 +114,9 @@ bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
// AreAllowedSuperPagesForBRPPool.
// - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
// designed to not need locking.
uintptr_t ReserveMemoryFromGigaCage(pool_handle pool,
uintptr_t requested_address,
size_t requested_size) {
uintptr_t ReserveMemoryFromPool(pool_handle pool,
uintptr_t requested_address,
size_t requested_size) {
PA_DCHECK(!(requested_address % kSuperPageSize));

uintptr_t reserved_address = AddressPoolManager::GetInstance().Reserve(
Expand Down Expand Up @@ -242,9 +242,9 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
{
// Getting memory for direct-mapped allocations doesn't interact with the
// rest of the allocator, but takes a long time, as it involves several
// system calls. With GigaCage, no mmap() (or equivalent) call is made on 64
// bit systems, but page permissions are changed with mprotect(), which is a
// syscall.
// system calls. Although no mmap() (or equivalent) calls are made on
// 64 bit systems, page permissions are changed with mprotect(), which is
// a syscall.
//
// These calls are almost always slow (at least a couple us per syscall on a
// desktop Linux machine), and they also have a very long latency tail,
Expand Down Expand Up @@ -277,17 +277,15 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PA_DCHECK(slot_size <= available_reservation_size);
#endif

// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
// BackupRefPtr support.
pool_handle pool = root->ChoosePool();
uintptr_t reservation_start;
{
// Reserving memory from the GigaCage is actually not a syscall on 64 bit
// Reserving memory from the pool is actually not a syscall on 64 bit
// platforms.
#if !defined(PA_HAS_64_BITS_POINTERS)
ScopedSyscallTimer timer{root};
#endif
reservation_start = ReserveMemoryFromGigaCage(pool, 0, reservation_size);
reservation_start = ReserveMemoryFromPool(pool, 0, reservation_size);
}
if (PA_UNLIKELY(!reservation_start)) {
if (return_null)
Expand Down Expand Up @@ -339,7 +337,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
#endif

// No need to hold root->lock_. Now that memory is reserved, no other
// overlapping region can be allocated (because of how GigaCage works),
// overlapping region can be allocated (because of how pools work),
// so no other thread can update the same offset table entries at the
// same time. Furthermore, nobody will be ready these offsets until this
// function returns.
Expand Down Expand Up @@ -411,7 +409,7 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
SlotSpanMetadata<thread_safe>(&metadata->bucket);

// It is typically possible to map a large range of inaccessible pages, and
// this is leveraged in multiple places, including the GigaCage. However,
// this is leveraged in multiple places, including the pools. However,
// this doesn't mean that we can commit all this memory. For the vast
// majority of allocations, this just means that we crash in a slightly
// different place, but for callers ready to handle failures, we have to
Expand Down Expand Up @@ -731,10 +729,8 @@ uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPageSpan(
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
uintptr_t requested_address = root->next_super_page;
// Allocate from GigaCage. Route to the appropriate GigaCage pool based on
// BackupRefPtr support.
pool_handle pool = root->ChoosePool();
uintptr_t super_page_span_start = ReserveMemoryFromGigaCage(
uintptr_t super_page_span_start = ReserveMemoryFromPool(
pool, requested_address, super_page_count * kSuperPageSize);
if (PA_UNLIKELY(!super_page_span_start)) {
if (flags & AllocFlags::kReturnNull)
Expand Down Expand Up @@ -1148,8 +1144,7 @@ bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
++num_full_slot_spans;
// Overflow. Most likely a correctness issue in the code. It is in theory
// possible that the number of full slot spans really reaches (1 << 24),
// but this is very unlikely (and not possible with most GigaCage
// settings).
// but this is very unlikely (and not possible with most pool settings).
PA_CHECK(num_full_slot_spans);
// Not necessary but might help stop accidents.
slot_span->next_slot_span = nullptr;
Expand Down
4 changes: 2 additions & 2 deletions base/allocator/partition_allocator/partition_root.h
Expand Up @@ -1171,8 +1171,8 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
// Returns whether MTE is supported for this partition root. Because MTE stores
// tagging information in the high bits of the pointer, it causes issues with
// components like V8's ArrayBuffers which use custom pointer representations.
// All custom representations encountered so far rely on a caged memory address
// area / configurable pool, so we use that as a proxy.
// All custom representations encountered so far rely on an "is in configurable
// pool?" check, so we use that as a proxy.
template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
const {
Expand Down

0 comments on commit 916dcd8

Please sign in to comment.