-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[sanitizer_common] Add darwin-specific MemoryRangeIsAvailable #167797
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
@llvm/pr-subscribers-compiler-rt-sanitizer Author: Andrew Haberlandt (ndrewh) ChangesThe fixes a race in the code that initializes shadow memory in ASAN: llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp Lines 66 to 91 in 4b05581
In step 2, To address this, this PR implements
After this fix, it should be possible to re-land #166005, which triggered this issue on the x86 iOS simulators. rdar://164208439 Full diff: https://github.com/llvm/llvm-project/pull/167797.diff 5 Files Affected:
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index b515b15b327d8..d59066701b70c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -487,6 +487,13 @@ inline uptr Log2(uptr x) {
return LeastSignificantSetBitIndex(x);
}
+inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2,
+ uptr end2) {
+ CHECK(start1 <= end1);
+ CHECK(start2 <= end2);
+ return (end1 < start2) || (end2 < start1);
+}
+
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template <class T>
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index 90c0b66f81b5b..24e446c3078bd 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -103,7 +103,9 @@ extern "C" {
natural_t *nesting_depth,
vm_region_recurse_info_t info,
mach_msg_type_number_t *infoCnt);
-}
+
+ extern const void* _dyld_get_shared_cache_range(size_t* length);
+ }
# if !SANITIZER_GO
// Weak symbol no-op when TSan is not linked
@@ -1403,15 +1405,27 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
return 0;
}
-// Returns true if the address is definitely mapped, and false if it is not
-// mapped or could not be determined.
-bool IsAddressInMappedRegion(uptr addr) {
+// This function (when used during initialization when there is
+// only a single thread), can be used to verify that a range
+// of memory hasn't already been mapped, and won't be mapped
+// later in the shared cache.
+//
+// If the syscall mach_vm_region_recurse fails (due to sandbox),
+// we assume that the memory is not mapped so that execution can continue.
+//
+// NOTE: range_end is inclusive
+//
+// WARNING: This function should NOT allocate memory, since it is
+// used in InitializeShadowMemory between where we search for
+// space for shadow and where we actually allocate it.
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
mach_vm_size_t vmsize = 0;
natural_t depth = 0;
vm_region_submap_short_info_data_64_t vminfo;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
- mach_vm_address_t address = addr;
+ mach_vm_address_t address = range_start;
+ // First, check if the range is already mapped.
kern_return_t kr =
mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
(vm_region_info_t)&vminfo, &count);
@@ -1423,7 +1437,24 @@ bool IsAddressInMappedRegion(uptr addr) {
Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n");
}
- return (kr == KERN_SUCCESS && addr >= address && addr < address + vmsize);
+ if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1,
+ range_start, range_end)) {
+ // Overlaps with already-mapped memory
+ return false;
+ }
+
+ size_t cacheLength;
+ uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+
+ if (cacheStart &&
+ !IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1,
+ range_start, range_end)) {
+ // Overlaps with shared cache region
+ return false;
+ }
+
+ // We believe this address is available.
+ return true;
}
// FIXME implement on this platform.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
index 789dd8e4d8e9c..b0e4ac7f40745 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
@@ -76,8 +76,6 @@ struct ThreadEventCallbacks {
void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);
-bool IsAddressInMappedRegion(uptr addr);
-
} // namespace __sanitizer
#endif // SANITIZER_APPLE
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
index 69af6465a62c2..5b2c4e668ca8f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
@@ -225,17 +225,9 @@ void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
return (void *)p;
}
-static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
- uptr start2, uptr end2) {
- CHECK(start1 <= end1);
- CHECK(start2 <= end2);
- return (end1 < start2) || (end2 < start1);
-}
-
+# if !SANITIZER_APPLE
// FIXME: this is thread-unsafe, but should not cause problems most of the time.
-// When the shadow is mapped only a single thread usually exists (plus maybe
-// several worker threads on Mac, which aren't expected to map big chunks of
-// memory).
+// When the shadow is mapped only a single thread usually exists
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
if (proc_maps.Error())
@@ -251,7 +243,6 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}
-#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
@@ -265,7 +256,7 @@ void DumpProcessMap() {
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
}
-#endif
+# endif
const char *GetPwd() {
return GetEnv("PWD");
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index 7fa5e017d3985..f6fe2405254e7 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -235,7 +235,7 @@ void InitializePlatformEarly() {
}
// In some configurations, the max_vm is expanded, but much of this space is
// already mapped. TSAN will not work in this configuration.
- if (IsAddressInMappedRegion(HiAppMemEnd() - 1)) {
+ if (!MemoryRangeIsAvailable(HiAppMemEnd() - 1, HiAppMemEnd())) {
Report(
"ThreadSanitizer: Unsupported virtual memory layout: Address %p is "
"already mapped.\n",
|
You can test this locally with the following command:git-clang-format --diff origin/main HEAD --extensions h,cpp -- compiler-rt/lib/sanitizer_common/sanitizer_common.h compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp compiler-rt/lib/sanitizer_common/sanitizer_mac.h compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp --diff_from_common_commit
View the diff from clang-format here.diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index a6f757173..4b932fc56 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -105,47 +105,49 @@ extern "C" {
mach_msg_type_number_t *infoCnt);
extern const void* _dyld_get_shared_cache_range(size_t* length);
-}
+ }
# if !SANITIZER_GO
-// Weak symbol no-op when TSan is not linked
-SANITIZER_WEAK_ATTRIBUTE extern void __tsan_set_in_internal_write_call(
- bool value) {}
+ // Weak symbol no-op when TSan is not linked
+ SANITIZER_WEAK_ATTRIBUTE extern void __tsan_set_in_internal_write_call(
+ bool value) {}
# endif
-namespace __sanitizer {
+ namespace __sanitizer {
-#include "sanitizer_syscall_generic.inc"
+# include "sanitizer_syscall_generic.inc"
-// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
-extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
- off_t off) SANITIZER_WEAK_ATTRIBUTE;
-extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+ // Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
+ extern "C" void* __mmap(void* addr, size_t len, int prot, int flags,
+ int fildes, off_t off) SANITIZER_WEAK_ATTRIBUTE;
+ extern "C" int __munmap(void*, size_t) SANITIZER_WEAK_ATTRIBUTE;
-// ---------------------- sanitizer_libc.h
+ // ---------------------- sanitizer_libc.h
-// From <mach/vm_statistics.h>, but not on older OSs.
-#ifndef VM_MEMORY_SANITIZER
-#define VM_MEMORY_SANITIZER 99
-#endif
+ // From <mach/vm_statistics.h>, but not on older OSs.
+# ifndef VM_MEMORY_SANITIZER
+# define VM_MEMORY_SANITIZER 99
+# endif
-// XNU on Darwin provides a mmap flag that optimizes allocation/deallocation of
-// giant memory regions (i.e. shadow memory regions).
-#define kXnuFastMmapFd 0x4
-static size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB
-static bool use_xnu_fast_mmap = false;
-
-uptr internal_mmap(void *addr, size_t length, int prot, int flags,
- int fd, u64 offset) {
- if (fd == -1) {
- fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);
- if (length >= kXnuFastMmapThreshold) {
- if (use_xnu_fast_mmap) fd |= kXnuFastMmapFd;
+ // XNU on Darwin provides a mmap flag that optimizes allocation/deallocation
+ // of giant memory regions (i.e. shadow memory regions).
+# define kXnuFastMmapFd 0x4
+ static size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB
+ static bool use_xnu_fast_mmap = false;
+
+ uptr internal_mmap(void* addr, size_t length, int prot, int flags, int fd,
+ u64 offset) {
+ if (fd == -1) {
+ fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);
+ if (length >= kXnuFastMmapThreshold) {
+ if (use_xnu_fast_mmap)
+ fd |= kXnuFastMmapFd;
+ }
}
+ if (&__mmap)
+ return (uptr)__mmap(addr, length, prot, flags, fd, offset);
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
}
- if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
- return (uptr)mmap(addr, length, prot, flags, fd, offset);
-}
uptr internal_munmap(void *addr, uptr length) {
if (&__munmap) return __munmap(addr, length);
|
|
I would like to ignore the clang-format diff, since it's to a bunch of sanitizer_mac.cpp code that is untouched by this change... |
DanBlackwell
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is an interesting issue, good spot! I only have a couple of formatting quibbles.
| // | ||
| // NOTE: range_end is inclusive | ||
| // | ||
| // WARNING: This function should NOT allocate memory, since it is |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
NIT: should not => must not
…ttempt 2) (#167800) This re-lands #166005, which was reverted due to the issue described in #167797. There are 4 small changes: - Fix LoadedModule leak by calling Clear() on the modules list - Fix internal_strncpy calls that are not null-terminated - Improve test to accept the dylib being loaded from a different path than compiled `{{.*}}[[DYLIB]]` - strcmp => internal_strncmp This should not be merged until after #167797. rdar://163149325
The fixes a TOCTOU bug in the code that initializes shadow memory in ASAN:
llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
Lines 66 to 91 in 4b05581
FindDynamicShadowStartto search the memory mapping for enough space to dynamically allocate shadow memory.MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);, which goes intoMemoryMappingLayout.ReserveShadowMemoryRange.In step 2,
MemoryMappingLayoutmakes various allocations using the internal allocator. This can cause the allocator to map more memory! In some cases, this can actually allocate memory that overlaps with the shadow region returned byFindDynamicShadowStartin step 1. This is not actually fatal, but it memory corruption; MAP_FIXED is allowed to overlap other regions, and the effect is any overlapping memory is zeroed.To address this, this PR implements
MemoryRangeIsAvailableon Darwin without any heap allocations:IntervalsAreSeparateinto sanitizer_common.hMemoryRangeIsAvailablebehind !SANITIZER_APPLEIsAddressInMappedRegionin sanitizer_mac becomesMemoryRangeIsAvailable, which also checks for overlap with the DYLD shared cache.After this fix, it should be possible to re-land #166005, which triggered this issue on the x86 iOS simulators.
rdar://164208439