Skip to content

Commit

Permalink
Fix a few issues found when testing an MSVC build locally.
Browse files Browse the repository at this point in the history
These won't get MSVC compiling, but seemed reasonably low-effort to
carry.  I think if I want to pursue much further, I need C++20.

Bug: none
Change-Id: If3879f9862c38f2a365d075ef43463d2b39f79c4
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3615250
Reviewed-by: Behdad Bakhshinategh <behdadb@chromium.org>
Commit-Queue: Peter Kasting <pkasting@chromium.org>
Auto-Submit: Peter Kasting <pkasting@chromium.org>
Reviewed-by: Nicolas Ouellet-Payeur <nicolaso@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1001751}
  • Loading branch information
pkasting authored and Chromium LUCI CQ committed May 10, 2022
1 parent 586e61b commit 436e5b7
Show file tree
Hide file tree
Showing 17 changed files with 197 additions and 161 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,7 @@ void ConfigurePartitions(
}
PA_DCHECK(!enable_brp);
PA_DCHECK(!use_dedicated_aligned_partition);
PA_DCHECK(!current_root->with_thread_cache);
PA_DCHECK(!current_root->flags.with_thread_cache);
return;
}

Expand Down
6 changes: 3 additions & 3 deletions base/allocator/partition_allocator/extended_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ namespace {
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been
// disabled.
if (!root || !root->with_thread_cache)
if (!root || !root->flags.with_thread_cache)
return;

ThreadCacheRegistry::Instance().PurgeAll();
root->with_thread_cache = false;
root->flags.with_thread_cache = false;
// Doesn't destroy the thread cache object(s). For background threads, they
// will be collected (and free cached memory) at thread destruction
// time. For the main thread, we leak it.
Expand All @@ -31,7 +31,7 @@ void EnablePartitionAllocThreadCacheForRootIfDisabled(
ThreadSafePartitionRoot* root) {
if (!root)
return;
root->with_thread_cache = true;
root->flags.with_thread_cache = true;
}

#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
Expand Down
21 changes: 12 additions & 9 deletions base/allocator/partition_allocator/partition_page.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,9 @@ using AllocationStateMap =
// booted out of the active list. If there are no suitable active slot spans
// found, an empty or decommitted slot spans (if one exists) will be pulled
// from the empty/decommitted list on to the active list.
#pragma pack(push, 1)
template <bool thread_safe>
struct __attribute__((packed)) SlotSpanMetadata {
struct SlotSpanMetadata {
private:
PartitionFreelistEntry* freelist_head = nullptr;

Expand Down Expand Up @@ -302,6 +303,7 @@ struct __attribute__((packed)) SlotSpanMetadata {
empty_cache_index_(0),
unused2_(0) {}
};
#pragma pack(pop)
static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
"SlotSpanMetadata must fit into a Page Metadata slot.");

Expand All @@ -324,11 +326,12 @@ struct SubsequentPageMetadata {
// first page of a slot span, describes that slot span. If a slot span spans
// more than 1 page, the page metadata may contain rudimentary additional
// information.
// "Pack" the union so that common page metadata still fits within
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
#pragma pack(push, 1)
template <bool thread_safe>
struct __attribute__((packed)) PartitionPage {
// "Pack" the union so that common page metadata still fits within
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
union __attribute__((packed)) {
struct PartitionPage {
union {
SlotSpanMetadata<thread_safe> slot_span_metadata;

SubsequentPageMetadata subsequent_page_metadata;
Expand Down Expand Up @@ -366,7 +369,7 @@ struct __attribute__((packed)) PartitionPage {

ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
};

#pragma pack(pop)
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");

Expand Down Expand Up @@ -617,7 +620,7 @@ SlotSpanMetadata<thread_safe>::FromObject(void* object) {
PA_DCHECK((::partition_alloc::internal::UnmaskPtr(object_addr) -
::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
slot_span->bucket->slot_size ==
root->extras_offset);
root->flags.extras_offset);
#endif // DCHECK_IS_ON()
return slot_span;
}
Expand All @@ -637,10 +640,10 @@ SlotSpanMetadata<thread_safe>::FromObjectInnerAddr(uintptr_t address) {
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
uintptr_t shift_from_slot_start =
(address - slot_span_start) % slot_span->bucket->slot_size;
PA_DCHECK(shift_from_slot_start >= root->extras_offset);
PA_DCHECK(shift_from_slot_start >= root->flags.extras_offset);
// Use <= to allow an address immediately past the object.
PA_DCHECK(shift_from_slot_start <=
root->extras_offset + slot_span->GetUsableSize(root));
root->flags.extras_offset + slot_span->GetUsableSize(root));
#endif // DCHECK_IS_ON()
return slot_span;
}
Expand Down
50 changes: 25 additions & 25 deletions base/allocator/partition_allocator/partition_root.cc
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,7 @@ void PartitionRoot<thread_safe>::DestructForTesting() {
// We need to destruct the thread cache before we unreserve any of the super
// pages below, which we currently are not doing. So, we should only call
// this function on PartitionRoots without a thread cache.
PA_CHECK(!with_thread_cache);
PA_CHECK(!flags.with_thread_cache);
auto pool_handle = ChoosePool();
auto* curr = first_extent;
while (curr != nullptr) {
Expand Down Expand Up @@ -674,24 +674,24 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
internal::PartitionAddressSpace::Init();
#endif

allow_aligned_alloc =
flags.allow_aligned_alloc =
opts.aligned_alloc == PartitionOptions::AlignedAlloc::kAllowed;
allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
flags.allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
brp_enabled_ =
flags.brp_enabled_ =
opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
#else
PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
#endif
use_configurable_pool =
flags.use_configurable_pool =
(opts.use_configurable_pool ==
PartitionOptions::UseConfigurablePool::kIfAvailable) &&
IsConfigurablePoolAvailable();
PA_DCHECK(!use_configurable_pool || IsConfigurablePoolAvailable());
PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());

// brp_enabled_() is not supported in the configurable pool because
// BRP requires objects to be in a different Pool.
PA_CHECK(!(use_configurable_pool && brp_enabled()));
PA_CHECK(!(flags.use_configurable_pool && brp_enabled()));

// Ref-count messes up alignment needed for AlignedAlloc, making this
// option incompatible. However, except in the
Expand All @@ -701,28 +701,28 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
#endif

#if defined(PA_EXTRAS_REQUIRED)
extras_size = 0;
extras_offset = 0;
flags.extras_size = 0;
flags.extras_offset = 0;

if (allow_cookie) {
extras_size += internal::kPartitionCookieSizeAdjustment;
if (flags.allow_cookie) {
flags.extras_size += internal::kPartitionCookieSizeAdjustment;
}

if (brp_enabled()) {
// TODO(tasak): In the PUT_REF_COUNT_IN_PREVIOUS_SLOT case, ref-count is
// stored out-of-line for single-slot slot spans, so no need to
// add/subtract its size in this case.
extras_size += internal::kPartitionRefCountSizeAdjustment;
extras_offset += internal::kPartitionRefCountOffsetAdjustment;
flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
flags.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
}
#endif // defined(PA_EXTRAS_REQUIRED)

// Re-confirm the above PA_CHECKs, by making sure there are no
// pre-allocation extras when AlignedAlloc is allowed. Post-allocation
// extras are ok.
PA_CHECK(!allow_aligned_alloc || !extras_offset);
PA_CHECK(!flags.allow_aligned_alloc || !flags.extras_offset);

quarantine_mode =
flags.quarantine_mode =
#if defined(PA_ALLOW_PCSCAN)
(opts.quarantine == PartitionOptions::Quarantine::kDisallowed
? QuarantineMode::kAlwaysDisabled
Expand Down Expand Up @@ -763,10 +763,10 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
with_thread_cache = false;
#else
ThreadCache::EnsureThreadSpecificDataInitialized();
with_thread_cache =
flags.with_thread_cache =
(opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);

if (with_thread_cache)
if (flags.with_thread_cache)
ThreadCache::Init(this);
#endif // !defined(PA_THREAD_CACHE_SUPPORTED)

Expand All @@ -786,7 +786,7 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
template <bool thread_safe>
PartitionRoot<thread_safe>::~PartitionRoot() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(!with_thread_cache)
PA_CHECK(!flags.with_thread_cache)
<< "Must not destroy a partition with a thread cache";
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

Expand All @@ -800,7 +800,7 @@ template <bool thread_safe>
void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
#if defined(PA_THREAD_CACHE_SUPPORTED)
::partition_alloc::internal::ScopedGuard guard{lock_};
PA_CHECK(!with_thread_cache);
PA_CHECK(!flags.with_thread_cache);
// By the time we get there, there may be multiple threads created in the
// process. Since `with_thread_cache` is accessed without a lock, it can
// become visible to another thread before the effects of
Expand All @@ -815,7 +815,7 @@ void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
PA_CHECK(before == 0);
ThreadCache::Init(this);
thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
with_thread_cache = true;
flags.with_thread_cache = true;
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}

Expand Down Expand Up @@ -916,7 +916,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(

#if DCHECK_IS_ON()
// Write a new trailing cookie.
if (allow_cookie) {
if (flags.allow_cookie) {
auto* object =
reinterpret_cast<unsigned char*>(SlotStartToObject(slot_start));
internal::PartitionCookieWriteValue(object +
Expand Down Expand Up @@ -965,7 +965,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
#if DCHECK_IS_ON()
// Write a new trailing cookie only when it is possible to keep track
// raw size (otherwise we wouldn't know where to look for it later).
if (allow_cookie) {
if (flags.allow_cookie) {
internal::PartitionCookieWriteValue(
reinterpret_cast<unsigned char*>(address) +
slot_span->GetUsableSize(this));
Expand Down Expand Up @@ -1204,7 +1204,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
stats.total_active_bytes += direct_mapped_allocations_total_size;
stats.total_active_count += num_direct_mapped_allocations;

stats.has_thread_cache = with_thread_cache;
stats.has_thread_cache = flags.with_thread_cache;
if (stats.has_thread_cache) {
ThreadCacheRegistry::Instance().DumpStats(
true, &stats.current_thread_cache_stats);
Expand Down Expand Up @@ -1241,9 +1241,9 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
template <bool thread_safe>
void PartitionRoot<thread_safe>::DeleteForTesting(
PartitionRoot* partition_root) {
if (partition_root->with_thread_cache) {
if (partition_root->flags.with_thread_cache) {
ThreadCache::SwapForTesting(nullptr);
partition_root->with_thread_cache = false;
partition_root->flags.with_thread_cache = false;
}

delete partition_root;
Expand Down

0 comments on commit 436e5b7

Please sign in to comment.