Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,13 @@ inline uptr Log2(uptr x) {
return LeastSignificantSetBitIndex(x);
}

inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2,
uptr end2) {
CHECK_LE(start1, end1);
CHECK_LE(start2, end2);
return (end1 < start2) || (end2 < start1);
}

// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template <class T>
Expand Down
41 changes: 36 additions & 5 deletions compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ extern "C" {
natural_t *nesting_depth,
vm_region_recurse_info_t info,
mach_msg_type_number_t *infoCnt);

extern const void* _dyld_get_shared_cache_range(size_t* length);
}

# if !SANITIZER_GO
Expand Down Expand Up @@ -1403,15 +1405,27 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
return 0;
}

// Returns true if the address is definitely mapped, and false if it is not
// mapped or could not be determined.
bool IsAddressInMappedRegion(uptr addr) {
// This function (when used during initialization when there is
// only a single thread), can be used to verify that a range
// of memory hasn't already been mapped, and won't be mapped
// later in the shared cache.
//
// If the syscall mach_vm_region_recurse fails (due to sandbox),
// we assume that the memory is not mapped so that execution can continue.
//
// NOTE: range_end is inclusive
//
// WARNING: This function must NOT allocate memory, since it is
// used in InitializeShadowMemory between where we search for
// space for shadow and where we actually allocate it.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
mach_vm_size_t vmsize = 0;
natural_t depth = 0;
vm_region_submap_short_info_data_64_t vminfo;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
mach_vm_address_t address = addr;
mach_vm_address_t address = range_start;

// First, check if the range is already mapped.
kern_return_t kr =
mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
(vm_region_info_t)&vminfo, &count);
Expand All @@ -1423,7 +1437,24 @@ bool IsAddressInMappedRegion(uptr addr) {
Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n");
}

return (kr == KERN_SUCCESS && addr >= address && addr < address + vmsize);
if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1,
range_start, range_end)) {
// Overlaps with already-mapped memory
return false;
}

size_t cacheLength;
uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);

if (cacheStart &&
!IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1,
range_start, range_end)) {
// Overlaps with shared cache region
return false;
}

// We believe this address is available.
return true;
}

// FIXME implement on this platform.
Expand Down
2 changes: 0 additions & 2 deletions compiler-rt/lib/sanitizer_common/sanitizer_mac.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,6 @@ struct ThreadEventCallbacks {

void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);

bool IsAddressInMappedRegion(uptr addr);

} // namespace __sanitizer

#endif // SANITIZER_APPLE
Expand Down
15 changes: 3 additions & 12 deletions compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,17 +225,9 @@ void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
return (void *)p;
}

static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
uptr start2, uptr end2) {
CHECK(start1 <= end1);
CHECK(start2 <= end2);
return (end1 < start2) || (end2 < start1);
}

# if !SANITIZER_APPLE
// FIXME: this is thread-unsafe, but should not cause problems most of the time.
// When the shadow is mapped only a single thread usually exists (plus maybe
// several worker threads on Mac, which aren't expected to map big chunks of
// memory).
// When the shadow is mapped only a single thread usually exists
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
if (proc_maps.Error())
Expand All @@ -251,7 +243,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}

#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
Expand All @@ -265,7 +256,7 @@ void DumpProcessMap() {
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
}
#endif
# endif

const char *GetPwd() {
return GetEnv("PWD");
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ void InitializePlatformEarly() {
}
// In some configurations, the max_vm is expanded, but much of this space is
// already mapped. TSAN will not work in this configuration.
if (IsAddressInMappedRegion(HiAppMemEnd() - 1)) {
if (!MemoryRangeIsAvailable(HiAppMemEnd() - 1, HiAppMemEnd())) {
Report(
"ThreadSanitizer: Unsupported virtual memory layout: Address %p is "
"already mapped.\n",
Expand Down
Loading