diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp index bb71af5ad8b6a..a9ede5d2323ed 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -22,6 +22,11 @@ # endif # include +// Start searching for available memory region past PAGEZERO, which is +// 4KB on 32-bit and 4GB on 64-bit. +# define GAP_SEARCH_START_ADDRESS \ + ((SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000) + # include "sanitizer_common.h" # include "sanitizer_file.h" # include "sanitizer_flags.h" @@ -58,9 +63,11 @@ extern char ***_NSGetArgv(void); # include // for dladdr() # include # include +# include # include # include # include +# include # include # include # include @@ -1100,6 +1107,67 @@ static void StripEnv() { } #endif // SANITIZER_GO +// Prints out a consolidated memory map: contiguous regions +// are merged together. +static void PrintVmmap() { + const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1; + mach_vm_address_t address = GAP_SEARCH_START_ADDRESS; + kern_return_t kr = KERN_SUCCESS; + + Report("Memory map:\n"); + mach_vm_address_t last = 0; + mach_vm_address_t lastsz = 0; + + while (1) { + mach_vm_size_t vmsize = 0; + natural_t depth = 0; + vm_region_submap_short_info_data_64_t vminfo; + mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, + (vm_region_info_t)&vminfo, &count); + + if (kr == KERN_DENIED) { + Report( + "ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory " + "map.\n"); + Report( + "HINT: Check whether mach_vm_region_recurse is allowed by " + "sandbox.\n"); + } + + if (kr == KERN_SUCCESS && address < max_vm_address) { + if (last + lastsz == address) { + // This region is contiguous with the last; merge together. + lastsz += vmsize; + } else { + if (lastsz) + Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last, + (void*)(last + lastsz), lastsz); + + last = address; + lastsz = vmsize; + } + address += vmsize; + } else { + // We've reached the end of the memory map. Print the last remaining + // region, if there is one. + if (lastsz) + Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last, + (void*)(last + lastsz), lastsz); + + break; + } + } +} + +static void ReportShadowAllocFail(uptr shadow_size_bytes, uptr alignment) { + Report( + "FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes " + "(alignment=%p).\n", + (void*)shadow_size_bytes, (void*)alignment); + PrintVmmap(); +} + char **GetArgv() { return *_NSGetArgv(); } @@ -1207,10 +1275,11 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, if (new_max_vm < max_occupied_addr) { Report("Unable to find a memory range for dynamic shadow.\n"); Report( - "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " - "new_max_vm = %p\n", - (void *)space_size, (void *)largest_gap_found, - (void *)max_occupied_addr, (void *)new_max_vm); + "\tspace_size = %p\n\tlargest_gap_found = %p\n\tmax_occupied_addr " + "= %p\n\tnew_max_vm = %p\n", + (void*)space_size, (void*)largest_gap_found, (void*)max_occupied_addr, + (void*)new_max_vm); + ReportShadowAllocFail(shadow_size_bytes, alignment); CHECK(0 && "cannot place shadow"); } RestrictMemoryToMaxAddress(new_max_vm); @@ -1221,6 +1290,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, nullptr, nullptr); if (shadow_start == 0) { Report("Unable to find a memory range after restricting VM.\n"); + ReportShadowAllocFail(shadow_size_bytes, alignment); CHECK(0 && "cannot place shadow after restricting vm"); } } @@ -1236,40 +1306,51 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, - uptr *largest_gap_found, - uptr *max_occupied_addr) { - typedef vm_region_submap_short_info_data_64_t RegionInfo; - enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; - // Start searching for available memory region past PAGEZERO, which is - // 4KB on 32-bit and 4GB on 64-bit. - mach_vm_address_t start_address = - (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000; - + uptr* largest_gap_found, + uptr* max_occupied_addr) { const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1; - mach_vm_address_t address = start_address; - mach_vm_address_t free_begin = start_address; + mach_vm_address_t address = GAP_SEARCH_START_ADDRESS; + mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS; kern_return_t kr = KERN_SUCCESS; if (largest_gap_found) *largest_gap_found = 0; if (max_occupied_addr) *max_occupied_addr = 0; while (kr == KERN_SUCCESS) { mach_vm_size_t vmsize = 0; natural_t depth = 0; - RegionInfo vminfo; - mach_msg_type_number_t count = kRegionInfoSize; + vm_region_submap_short_info_data_64_t vminfo; + mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, (vm_region_info_t)&vminfo, &count); - // There are cases where going beyond the processes' max vm does - // not return KERN_INVALID_ADDRESS so we check for going beyond that - // max address as well. - if (kr == KERN_INVALID_ADDRESS || address > max_vm_address) { + if (kr == KERN_SUCCESS) { + // There are cases where going beyond the processes' max vm does + // not return KERN_INVALID_ADDRESS so we check for going beyond that + // max address as well. + if (address > max_vm_address) { + address = max_vm_address; + kr = -1; // break after this iteration. + } + + if (max_occupied_addr) + *max_occupied_addr = address + vmsize; + } else if (kr == KERN_INVALID_ADDRESS) { // No more regions beyond "address", consider the gap at the end of VM. address = max_vm_address; - vmsize = 0; - kr = -1; // break after this iteration. + + // We will break after this iteration anyway since kr != KERN_SUCCESS + } else if (kr == KERN_DENIED) { + Report("ERROR: Unable to find a memory range for dynamic shadow.\n"); + Report("HINT: Ensure mach_vm_region_recurse is allowed under sandbox.\n"); + Die(); } else { - if (max_occupied_addr) *max_occupied_addr = address + vmsize; + Report( + "WARNING: mach_vm_region_recurse returned unexpected code %d (%s)\n", + kr, mach_error_string(kr)); + DCHECK(false && "mach_vm_region_recurse returned unexpected code"); + break; // address is not valid unless KERN_SUCCESS, therefore we must not + // use it. } + if (free_begin != address) { // We found a free region [free_begin..address-1]. uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment); @@ -1292,6 +1373,29 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, return 0; } +// Returns true if the address is definitely mapped, and false if it is not +// mapped or could not be determined. +bool IsAddressInMappedRegion(uptr addr) { + mach_vm_size_t vmsize = 0; + natural_t depth = 0; + vm_region_submap_short_info_data_64_t vminfo; + mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + mach_vm_address_t address = addr; + + kern_return_t kr = + mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth, + (vm_region_info_t)&vminfo, &count); + + if (kr == KERN_DENIED) { + Report( + "WARN: mach_vm_region_recurse returned KERN_DENIED when checking " + "whether an address is mapped.\n"); + Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n"); + } + + return (kr == KERN_SUCCESS && addr >= address && addr < address + vmsize); +} + // FIXME implement on this platform. void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h index b0e4ac7f40745..789dd8e4d8e9c 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h @@ -76,6 +76,8 @@ struct ThreadEventCallbacks { void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks); +bool IsAddressInMappedRegion(uptr addr); + } // namespace __sanitizer #endif // SANITIZER_APPLE diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp index eb344df168ab9..62ab0554df08e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp @@ -226,9 +226,20 @@ static void ThreadTerminateCallback(uptr thread) { void InitializePlatformEarly() { # if !SANITIZER_GO && SANITIZER_IOS uptr max_vm = GetMaxUserVirtualAddress() + 1; - if (max_vm != HiAppMemEnd()) { - Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n", - (void *)max_vm, (void *)HiAppMemEnd()); + if (max_vm < HiAppMemEnd()) { + Report( + "ThreadSanitizer: Unsupported virtual memory layout:\n\tVM address " + "limit = %p\n\tExpected %p.\n", + (void*)max_vm, (void*)HiAppMemEnd()); + Die(); + } + // In some configurations, the max_vm is expanded, but much of this space is + // already mapped. TSAN will not work in this configuration. + if (IsAddressInMappedRegion(HiAppMemEnd() - 1)) { + Report( + "ThreadSanitizer: Unsupported virtual memory layout: Address %p is " + "already mapped.\n", + (void*)(HiAppMemEnd() - 1)); Die(); } #endif diff --git a/compiler-rt/test/asan/TestCases/Darwin/sandbox-vm-region-recurse.cpp b/compiler-rt/test/asan/TestCases/Darwin/sandbox-vm-region-recurse.cpp new file mode 100644 index 0000000000000..c496d822a7fb8 --- /dev/null +++ b/compiler-rt/test/asan/TestCases/Darwin/sandbox-vm-region-recurse.cpp @@ -0,0 +1,33 @@ +// Check that if mach_vm_region_recurse is disallowed by sandbox, we report a message saying so. + +// RUN: %clangxx_asan -O0 %s -o %t +// RUN: not %run sandbox-exec -p '(version 1)(allow default)(deny syscall-mig (kernel-mig-routine mach_vm_region_recurse))' %t 2>&1 | FileCheck --check-prefix=CHECK-DENY %s +// RUN: not %run %t 2>&1 | FileCheck --check-prefix=CHECK-ALLOW %s +// RUN: %clangxx_asan -O3 %s -o %t +// RUN: not %run sandbox-exec -p '(version 1)(allow default)(deny syscall-mig (kernel-mig-routine mach_vm_region_recurse))' %t 2>&1 | FileCheck --check-prefix=CHECK-DENY %s +// RUN: not %run %t 2>&1 | FileCheck --check-prefix=CHECK-ALLOW %s + +// sandbox-exec isn't available on iOS +// UNSUPPORTED: ios + +// x86_64 does not use ASAN_SHADOW_OFFSET_DYNAMIC +// UNSUPPORTED: x86_64-darwin || x86_64h-darwin + +#include + +int main() { + char *x = (char *)malloc(10 * sizeof(char)); + free(x); + return x[5]; + // CHECK-ALLOW: {{.*ERROR: AddressSanitizer: heap-use-after-free on address}} + // CHECK-DENY-NOT: {{.*ERROR: AddressSanitizer: heap-use-after-free on address}} + // CHECK-ALLOW: {{READ of size 1 at 0x.* thread T0}} + // CHECK-ALLOW: {{ #0 0x.* in main}} + // CHECK-ALLOW: {{freed by thread T0 here:}} + // CHECK-ALLOW: {{ #0 0x.* in free}} + // CHECK-ALLOW: {{ #1 0x.* in main}} + // CHECK-ALLOW: {{previously allocated by thread T0 here:}} + // CHECK-ALLOW: {{ #0 0x.* in malloc}} + // CHECK-ALLOW: {{ #1 0x.* in main}} + // CHECK-DENY: {{.*HINT: Ensure mach_vm_region_recurse is allowed under sandbox}} +}