Skip to content

Commit

Permalink
[MTE] Use 16B ref count for MTE+BRP
Browse files Browse the repository at this point in the history
If memory tagging is enabled with BRP previous slot, the MTE tag and BRP ref count will cause a race. To prevent this, the ref_count_size is increased to the MTE granule size and the ref count is not tagged.

(cherry picked from commit a88f4ef)

Bug: 1448009, 1445816
Change-Id: I10f9512d9ec5ebb1eef7d1b8bdb5e0470ef3360f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4569410
Commit-Queue: Keishi Hattori <keishi@chromium.org>
Reviewed-by: Bartek Nowierski <bartekn@chromium.org>
Cr-Original-Commit-Position: refs/heads/main@{#1152297}
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4596468
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Auto-Submit: Keishi Hattori <keishi@chromium.org>
Cr-Commit-Position: refs/branch-heads/5790@{#445}
Cr-Branched-From: 1d71a33-refs/heads/main@{#1148114}
  • Loading branch information
Keishi Hattori authored and Chromium LUCI CQ committed Jun 7, 2023
1 parent 6e4f6bc commit 00822e3
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 16 deletions.
12 changes: 12 additions & 0 deletions base/allocator/partition_allocator/partition_alloc_config.h
Expand Up @@ -158,6 +158,18 @@ static_assert(sizeof(void*) != 8, "");
static_assert(sizeof(void*) == 8);
#endif

// If memory tagging is enabled with BRP previous slot, the MTE tag and BRP ref
// count will cause a race (crbug.com/1445816). To prevent this, the
// ref_count_size is increased to the MTE granule size and the ref count is not
// tagged.
#if PA_CONFIG(HAS_MEMORY_TAGGING) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
#define PA_CONFIG_INCREASE_REF_COUNT_SIZE_FOR_MTE() 1
#else
#define PA_CONFIG_INCREASE_REF_COUNT_SIZE_FOR_MTE() 0
#endif

// Specifies whether allocation extras need to be added.
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#define PA_CONFIG_EXTRAS_REQUIRED() 1
Expand Down
19 changes: 15 additions & 4 deletions base/allocator/partition_allocator/partition_alloc_unittest.cc
Expand Up @@ -254,14 +254,25 @@ struct PartitionAllocTestParam {
};

const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {
std::vector<size_t> ref_count_sizes = {0, 8, 16};
// sizeof(PartitionRefCount) == 8 under some configurations, so we can't force
// the size down to 4.
std::vector<size_t> ref_count_sizes = {16};

bool only_supports_16b_ref_count = false;
#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
only_supports_16b_ref_count =
partition_alloc::internal::base::CPU::GetInstanceNoAllocation().has_mte();
#endif

if (!only_supports_16b_ref_count) {
ref_count_sizes.push_back(0);
ref_count_sizes.push_back(8);
// sizeof(PartitionRefCount) == 8 under some configurations, so we can't
// force the size down to 4.
#if !PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE) && \
!PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
!BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
ref_count_sizes.push_back(4);
ref_count_sizes.push_back(4);
#endif
}

std::vector<PartitionAllocTestParam> params;
for (size_t ref_count_size : ref_count_sizes) {
Expand Down
19 changes: 17 additions & 2 deletions base/allocator/partition_allocator/partition_bucket.cc
Expand Up @@ -581,6 +581,20 @@ uint8_t ComputeSystemPagesPerSlotSpanInternal(size_t slot_size) {
return static_cast<uint8_t>(best_pages);
}

#if PA_CONFIG(HAS_MEMORY_TAGGING)
// Returns size that should be tagged. Avoiding the previous slot ref count if
// it exists to avoid a race (crbug.com/1445816).
template <bool thread_safe>
PA_ALWAYS_INLINE size_t TagSizeForSlot(PartitionRoot<thread_safe>* root,
size_t slot_size) {
#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
return slot_size - root->flags.ref_count_size;
#else
return slot_size;
#endif
}
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)

} // namespace

uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
Expand Down Expand Up @@ -979,7 +993,7 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
root->IsMemoryTaggingEnabled() && slot_size <= kMaxMemoryTaggingSize;
if (PA_LIKELY(use_tagging)) {
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size);
TagMemoryRangeRandomly(return_slot, TagSizeForSlot(root, slot_size));
}
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
// Add all slots that fit within so far committed pages to the free list.
Expand All @@ -993,7 +1007,8 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// Ensure the MTE-tag of the memory pointed by other provisioned slot is
// unguessable. They will be returned to the app as is, and the MTE-tag
// will only change upon calling Free().
next_slot_ptr = TagMemoryRangeRandomly(next_slot, slot_size);
next_slot_ptr =
TagMemoryRangeRandomly(next_slot, TagSizeForSlot(root, slot_size));
} else {
// No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot);
Expand Down
10 changes: 2 additions & 8 deletions base/allocator/partition_allocator/partition_ref_count.h
Expand Up @@ -419,14 +419,8 @@ PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
#endif
// Have to MTE-tag, because the address is untagged, but lies within a slot
// area, which is protected by MTE.
//
// There could be a race condition though if the previous slot is
// freed/retagged concurrently, so ideally the ref count should occupy its
// own MTE granule.
// TODO(crbug.com/1445816): improve this.
return static_cast<PartitionRefCount*>(TagAddr(refcount_address));
// No need to tag because the ref count is not protected by MTE.
return reinterpret_cast<PartitionRefCount*>(refcount_address);
} else {
// No need to tag, as the metadata region isn't protected by MTE.
PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
Expand Down
7 changes: 7 additions & 0 deletions base/allocator/partition_allocator/partition_root.cc
Expand Up @@ -968,6 +968,13 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
if (!ref_count_size) {
ref_count_size = internal::kPartitionRefCountSizeAdjustment;
}
#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
if (IsMemoryTaggingEnabled()) {
ref_count_size = internal::base::bits::AlignUp(
ref_count_size, internal::kMemTagGranuleSize);
}
flags.ref_count_size = ref_count_size;
#endif // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
PA_CHECK(internal::kPartitionRefCountSizeAdjustment <= ref_count_size);
flags.extras_size += ref_count_size;
flags.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
Expand Down
11 changes: 9 additions & 2 deletions base/allocator/partition_allocator/partition_root.h
Expand Up @@ -258,7 +258,10 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool use_configurable_pool;
#if PA_CONFIG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_ = false;
#endif
#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
size_t ref_count_size;
#endif // PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
ThreadIsolationOption thread_isolation;
#endif
Expand Down Expand Up @@ -1218,8 +1221,12 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
// slot_span is untagged at this point, so we have to recover its tag
// again to increment and provide use-after-free mitigations.
uintptr_t slot_start_to_object_delta = object_addr - slot_start;
size_t tag_size = slot_size;
#if PA_CONFIG(INCREASE_REF_COUNT_SIZE_FOR_MTE)
tag_size -= root->flags.ref_count_size;
#endif
void* retagged_slot_start = internal::TagMemoryRangeIncrement(
internal::TagAddr(slot_start), slot_size);
internal::TagAddr(slot_start), tag_size);
// Incrementing the MTE-tag in the memory range invalidates the |object|'s
// tag, so it must be retagged.
object = reinterpret_cast<void*>(
Expand Down

0 comments on commit 00822e3

Please sign in to comment.