Skip to content

Commit

Permalink
Revert "Fix a few issues found when testing an MSVC build locally."
Browse files Browse the repository at this point in the history
This reverts commit 436e5b7.

Reason for revert:  Win10 Tests x64 (dbg) base_unittests failures:
https://ci.chromium.org/ui/p/chromium/builders/ci/Win10%20Tests%20x64%20(dbg)/27879/overview

Original change's description:
> Fix a few issues found when testing an MSVC build locally.
>
> These won't get MSVC compiling, but seemed reasonably low-effort to
> carry.  I think if I want to pursue much further, I need C++20.
>
> Bug: none
> Change-Id: If3879f9862c38f2a365d075ef43463d2b39f79c4
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3615250
> Reviewed-by: Behdad Bakhshinategh <behdadb@chromium.org>
> Commit-Queue: Peter Kasting <pkasting@chromium.org>
> Auto-Submit: Peter Kasting <pkasting@chromium.org>
> Reviewed-by: Nicolas Ouellet-Payeur <nicolaso@chromium.org>
> Reviewed-by: Daniel Cheng <dcheng@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#1001751}

Bug: none
Change-Id: I98abc04eade6c7e665a652e816699e449a24c1d2
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3641530
Reviewed-by: Aya Elsayed <ayaelattar@chromium.org>
Auto-Submit: Sergey Poromov <poromov@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Sergey Poromov <poromov@chromium.org>
Owners-Override: Sergey Poromov <poromov@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1002027}
  • Loading branch information
Sergey Poromov authored and Chromium LUCI CQ committed May 11, 2022
1 parent bb2b324 commit 98a1292
Show file tree
Hide file tree
Showing 17 changed files with 161 additions and 197 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,7 @@ void ConfigurePartitions(
}
PA_DCHECK(!enable_brp);
PA_DCHECK(!use_dedicated_aligned_partition);
PA_DCHECK(!current_root->flags.with_thread_cache);
PA_DCHECK(!current_root->with_thread_cache);
return;
}

Expand Down
6 changes: 3 additions & 3 deletions base/allocator/partition_allocator/extended_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ namespace {
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been
// disabled.
if (!root || !root->flags.with_thread_cache)
if (!root || !root->with_thread_cache)
return;

ThreadCacheRegistry::Instance().PurgeAll();
root->flags.with_thread_cache = false;
root->with_thread_cache = false;
// Doesn't destroy the thread cache object(s). For background threads, they
// will be collected (and free cached memory) at thread destruction
// time. For the main thread, we leak it.
Expand All @@ -31,7 +31,7 @@ void EnablePartitionAllocThreadCacheForRootIfDisabled(
ThreadSafePartitionRoot* root) {
if (!root)
return;
root->flags.with_thread_cache = true;
root->with_thread_cache = true;
}

#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
Expand Down
21 changes: 9 additions & 12 deletions base/allocator/partition_allocator/partition_page.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,8 @@ using AllocationStateMap =
// booted out of the active list. If there are no suitable active slot spans
// found, an empty or decommitted slot spans (if one exists) will be pulled
// from the empty/decommitted list on to the active list.
#pragma pack(push, 1)
template <bool thread_safe>
struct SlotSpanMetadata {
struct __attribute__((packed)) SlotSpanMetadata {
private:
PartitionFreelistEntry* freelist_head = nullptr;

Expand Down Expand Up @@ -303,7 +302,6 @@ struct SlotSpanMetadata {
empty_cache_index_(0),
unused2_(0) {}
};
#pragma pack(pop)
static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
"SlotSpanMetadata must fit into a Page Metadata slot.");

Expand All @@ -326,12 +324,11 @@ struct SubsequentPageMetadata {
// first page of a slot span, describes that slot span. If a slot span spans
// more than 1 page, the page metadata may contain rudimentary additional
// information.
// "Pack" the union so that common page metadata still fits within
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
#pragma pack(push, 1)
template <bool thread_safe>
struct PartitionPage {
union {
struct __attribute__((packed)) PartitionPage {
// "Pack" the union so that common page metadata still fits within
// kPageMetadataSize. (SlotSpanMetadata is also "packed".)
union __attribute__((packed)) {
SlotSpanMetadata<thread_safe> slot_span_metadata;

SubsequentPageMetadata subsequent_page_metadata;
Expand Down Expand Up @@ -369,7 +366,7 @@ struct PartitionPage {

ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
};
#pragma pack(pop)

static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");

Expand Down Expand Up @@ -620,7 +617,7 @@ SlotSpanMetadata<thread_safe>::FromObject(void* object) {
PA_DCHECK((::partition_alloc::internal::UnmaskPtr(object_addr) -
::partition_alloc::internal::UnmaskPtr(slot_span_start)) %
slot_span->bucket->slot_size ==
root->flags.extras_offset);
root->extras_offset);
#endif // DCHECK_IS_ON()
return slot_span;
}
Expand All @@ -640,10 +637,10 @@ SlotSpanMetadata<thread_safe>::FromObjectInnerAddr(uintptr_t address) {
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
uintptr_t shift_from_slot_start =
(address - slot_span_start) % slot_span->bucket->slot_size;
PA_DCHECK(shift_from_slot_start >= root->flags.extras_offset);
PA_DCHECK(shift_from_slot_start >= root->extras_offset);
// Use <= to allow an address immediately past the object.
PA_DCHECK(shift_from_slot_start <=
root->flags.extras_offset + slot_span->GetUsableSize(root));
root->extras_offset + slot_span->GetUsableSize(root));
#endif // DCHECK_IS_ON()
return slot_span;
}
Expand Down
50 changes: 25 additions & 25 deletions base/allocator/partition_allocator/partition_root.cc
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,7 @@ void PartitionRoot<thread_safe>::DestructForTesting() {
// We need to destruct the thread cache before we unreserve any of the super
// pages below, which we currently are not doing. So, we should only call
// this function on PartitionRoots without a thread cache.
PA_CHECK(!flags.with_thread_cache);
PA_CHECK(!with_thread_cache);
auto pool_handle = ChoosePool();
auto* curr = first_extent;
while (curr != nullptr) {
Expand Down Expand Up @@ -674,24 +674,24 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
internal::PartitionAddressSpace::Init();
#endif

flags.allow_aligned_alloc =
allow_aligned_alloc =
opts.aligned_alloc == PartitionOptions::AlignedAlloc::kAllowed;
flags.allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
#if BUILDFLAG(USE_BACKUP_REF_PTR)
flags.brp_enabled_ =
brp_enabled_ =
opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
#else
PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
#endif
flags.use_configurable_pool =
use_configurable_pool =
(opts.use_configurable_pool ==
PartitionOptions::UseConfigurablePool::kIfAvailable) &&
IsConfigurablePoolAvailable();
PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());
PA_DCHECK(!use_configurable_pool || IsConfigurablePoolAvailable());

// brp_enabled_() is not supported in the configurable pool because
// BRP requires objects to be in a different Pool.
PA_CHECK(!(flags.use_configurable_pool && brp_enabled()));
PA_CHECK(!(use_configurable_pool && brp_enabled()));

// Ref-count messes up alignment needed for AlignedAlloc, making this
// option incompatible. However, except in the
Expand All @@ -701,28 +701,28 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
#endif

#if defined(PA_EXTRAS_REQUIRED)
flags.extras_size = 0;
flags.extras_offset = 0;
extras_size = 0;
extras_offset = 0;

if (flags.allow_cookie) {
flags.extras_size += internal::kPartitionCookieSizeAdjustment;
if (allow_cookie) {
extras_size += internal::kPartitionCookieSizeAdjustment;
}

if (brp_enabled()) {
// TODO(tasak): In the PUT_REF_COUNT_IN_PREVIOUS_SLOT case, ref-count is
// stored out-of-line for single-slot slot spans, so no need to
// add/subtract its size in this case.
flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
flags.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
extras_size += internal::kPartitionRefCountSizeAdjustment;
extras_offset += internal::kPartitionRefCountOffsetAdjustment;
}
#endif // defined(PA_EXTRAS_REQUIRED)

// Re-confirm the above PA_CHECKs, by making sure there are no
// pre-allocation extras when AlignedAlloc is allowed. Post-allocation
// extras are ok.
PA_CHECK(!flags.allow_aligned_alloc || !flags.extras_offset);
PA_CHECK(!allow_aligned_alloc || !extras_offset);

flags.quarantine_mode =
quarantine_mode =
#if defined(PA_ALLOW_PCSCAN)
(opts.quarantine == PartitionOptions::Quarantine::kDisallowed
? QuarantineMode::kAlwaysDisabled
Expand Down Expand Up @@ -763,10 +763,10 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
with_thread_cache = false;
#else
ThreadCache::EnsureThreadSpecificDataInitialized();
flags.with_thread_cache =
with_thread_cache =
(opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);

if (flags.with_thread_cache)
if (with_thread_cache)
ThreadCache::Init(this);
#endif // !defined(PA_THREAD_CACHE_SUPPORTED)

Expand All @@ -786,7 +786,7 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
template <bool thread_safe>
PartitionRoot<thread_safe>::~PartitionRoot() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(!flags.with_thread_cache)
PA_CHECK(!with_thread_cache)
<< "Must not destroy a partition with a thread cache";
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

Expand All @@ -800,7 +800,7 @@ template <bool thread_safe>
void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
#if defined(PA_THREAD_CACHE_SUPPORTED)
::partition_alloc::internal::ScopedGuard guard{lock_};
PA_CHECK(!flags.with_thread_cache);
PA_CHECK(!with_thread_cache);
// By the time we get there, there may be multiple threads created in the
// process. Since `with_thread_cache` is accessed without a lock, it can
// become visible to another thread before the effects of
Expand All @@ -815,7 +815,7 @@ void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
PA_CHECK(before == 0);
ThreadCache::Init(this);
thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
flags.with_thread_cache = true;
with_thread_cache = true;
#endif // defined(PA_THREAD_CACHE_SUPPORTED)
}

Expand Down Expand Up @@ -916,7 +916,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(

#if DCHECK_IS_ON()
// Write a new trailing cookie.
if (flags.allow_cookie) {
if (allow_cookie) {
auto* object =
reinterpret_cast<unsigned char*>(SlotStartToObject(slot_start));
internal::PartitionCookieWriteValue(object +
Expand Down Expand Up @@ -965,7 +965,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
#if DCHECK_IS_ON()
// Write a new trailing cookie only when it is possible to keep track
// raw size (otherwise we wouldn't know where to look for it later).
if (flags.allow_cookie) {
if (allow_cookie) {
internal::PartitionCookieWriteValue(
reinterpret_cast<unsigned char*>(address) +
slot_span->GetUsableSize(this));
Expand Down Expand Up @@ -1204,7 +1204,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
stats.total_active_bytes += direct_mapped_allocations_total_size;
stats.total_active_count += num_direct_mapped_allocations;

stats.has_thread_cache = flags.with_thread_cache;
stats.has_thread_cache = with_thread_cache;
if (stats.has_thread_cache) {
ThreadCacheRegistry::Instance().DumpStats(
true, &stats.current_thread_cache_stats);
Expand Down Expand Up @@ -1241,9 +1241,9 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
template <bool thread_safe>
void PartitionRoot<thread_safe>::DeleteForTesting(
PartitionRoot* partition_root) {
if (partition_root->flags.with_thread_cache) {
if (partition_root->with_thread_cache) {
ThreadCache::SwapForTesting(nullptr);
partition_root->flags.with_thread_cache = false;
partition_root->with_thread_cache = false;
}

delete partition_root;
Expand Down

0 comments on commit 98a1292

Please sign in to comment.