Skip to content

Commit

Permalink
Make more allocator methods work correctly with in an out-of-process …
Browse files Browse the repository at this point in the history
…mode.

Summary:
This makes `GetBlockBegin()` and `GetBlockBeginFastLocked()` work correctly with `RemoteAddressSpaceView`.

This has a knock on effect of also making the `PointerIsMine()` and
`GetMetaData()` methods behave correctly when `RemoteAddressSpaceView`
is used to instantiate the allocators.

This will be used by future out-of-process allocator enumeration
patches.

rdar://problem/45284065

Reviewers: kcc, vitalybuka, dvyukov, cryptoad, eugenis, george.karpenkov, yln

Subscribers: #sanitizers, llvm-commits, kubamracek

Differential Revision: https://reviews.llvm.org/D56964

llvm-svn: 352335
  • Loading branch information
danliew committed Jan 27, 2019
1 parent cfca2a7 commit 225eb75
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 13 deletions.
Expand Up @@ -188,7 +188,7 @@ class SizeClassAllocator64 {
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
if (class_id >= kNumClasses) return nullptr;
RegionInfo *region = GetRegionInfo(class_id);
const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
return nullptr;
Expand Down
29 changes: 17 additions & 12 deletions compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
Expand Up @@ -183,22 +183,25 @@ class LargeMmapAllocator {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
uptr nearest_chunk = 0;
Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
// Cache-friendly linear search.
for (uptr i = 0; i < n_chunks_; i++) {
uptr ch = reinterpret_cast<uptr>(chunks_[i]);
uptr ch = reinterpret_cast<uptr>(chunks[i]);
if (p < ch) continue; // p is at left to this chunk, skip it.
if (p - ch < p - nearest_chunk)
nearest_chunk = ch;
}
if (!nearest_chunk)
return nullptr;
Header *h = reinterpret_cast<Header *>(nearest_chunk);
const Header *h =
AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p);
if (h->map_beg + h->map_size <= p)
return nullptr;
return GetUser(h);
return GetUser(h_ptr);
}

void EnsureSortedChunks() {
Expand All @@ -218,33 +221,35 @@ class LargeMmapAllocator {
uptr n = n_chunks_;
if (!n) return nullptr;
EnsureSortedChunks();
auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
auto max_mmap_ =
reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
AddressSpaceView::Load(chunks[n - 1])->map_size;
if (p < min_mmap_ || p >= max_mmap_)
return nullptr;
uptr beg = 0, end = n - 1;
// This loop is a log(n) lower_bound. It does not check for the exact match
// to avoid expensive cache-thrashing loads.
while (end - beg >= 2) {
uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
if (p < reinterpret_cast<uptr>(chunks_[mid]))
end = mid - 1; // We are not interested in chunks_[mid].
if (p < reinterpret_cast<uptr>(chunks[mid]))
end = mid - 1; // We are not interested in chunks[mid].
else
beg = mid; // chunks_[mid] may still be what we want.
beg = mid; // chunks[mid] may still be what we want.
}

if (beg < end) {
CHECK_EQ(beg + 1, end);
// There are 2 chunks left, choose one.
if (p >= reinterpret_cast<uptr>(chunks_[end]))
if (p >= reinterpret_cast<uptr>(chunks[end]))
beg = end;
}

Header *h = chunks_[beg];
const Header *h = AddressSpaceView::Load(chunks[beg]);
Header *h_ptr = chunks[beg];
if (h->map_beg + h->map_size <= p || p < h->map_beg)
return nullptr;
return GetUser(h);
return GetUser(h_ptr);
}

void PrintStats() {
Expand Down

0 comments on commit 225eb75

Please sign in to comment.