diff --git a/compiler-rt/lib/asan/asan_internal.h b/compiler-rt/lib/asan/asan_internal.h index d4bfe996b664e7..cfb54927c6cf4f 100644 --- a/compiler-rt/lib/asan/asan_internal.h +++ b/compiler-rt/lib/asan/asan_internal.h @@ -118,8 +118,6 @@ void AppendToErrorMessageBuffer(const char *buffer); void *AsanDlSymNext(const char *sym); -void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name); - // Returns `true` iff most of ASan init process should be skipped due to the // ASan library being loaded via `dlopen()`. Platforms may perform any // `dlopen()` specific initialization inside this function. diff --git a/compiler-rt/lib/asan/asan_linux.cpp b/compiler-rt/lib/asan/asan_linux.cpp index ce5e873dc51803..aa93bbd79d1324 100644 --- a/compiler-rt/lib/asan/asan_linux.cpp +++ b/compiler-rt/lib/asan/asan_linux.cpp @@ -87,25 +87,12 @@ void *AsanDoesNotSupportStaticLinkage() { return &_DYNAMIC; // defined in link.h } -static void UnmapFromTo(uptr from, uptr to) { - CHECK(to >= from); - if (to == from) return; - uptr res = internal_munmap(reinterpret_cast(from), to - from); - if (UNLIKELY(internal_iserror(res))) { - Report( - "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address " - "%p\n", - to - from, to - from, from); - CHECK("unable to unmap" && 0); - } -} - #if ASAN_PREMAP_SHADOW -uptr FindPremappedShadowStart() { +uptr FindPremappedShadowStart(uptr shadow_size_bytes) { uptr granularity = GetMmapGranularity(); uptr shadow_start = reinterpret_cast(&__asan_shadow); uptr premap_shadow_size = PremapShadowSize(); - uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); + uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); // We may have mapped too much. Release extra memory. UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); return shadow_start; @@ -113,25 +100,14 @@ uptr FindPremappedShadowStart() { #endif uptr FindDynamicShadowStart() { + uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd); #if ASAN_PREMAP_SHADOW if (!PremapShadowFailed()) - return FindPremappedShadowStart(); + return FindPremappedShadowStart(shadow_size_bytes); #endif - uptr granularity = GetMmapGranularity(); - uptr alignment = granularity * 8; - uptr left_padding = granularity; - uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); - uptr map_size = shadow_size + left_padding + alignment; - - uptr map_start = (uptr)MmapNoAccess(map_size); - CHECK_NE(map_start, ~(uptr)0); - - uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); - UnmapFromTo(map_start, shadow_start - left_padding); - UnmapFromTo(shadow_start + shadow_size, map_start + map_size); - - return shadow_start; + return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE, + /*min_shadow_base_alignment*/ 0, kHighMemEnd); } void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { diff --git a/compiler-rt/lib/asan/asan_mac.cpp b/compiler-rt/lib/asan/asan_mac.cpp index a8d3f5d3473c40..3182aacb0b5e90 100644 --- a/compiler-rt/lib/asan/asan_mac.cpp +++ b/compiler-rt/lib/asan/asan_mac.cpp @@ -55,46 +55,8 @@ void *AsanDoesNotSupportStaticLinkage() { } uptr FindDynamicShadowStart() { - uptr granularity = GetMmapGranularity(); - uptr alignment = 8 * granularity; - uptr left_padding = granularity; - uptr space_size = kHighShadowEnd + left_padding; - - uptr largest_gap_found = 0; - uptr max_occupied_addr = 0; - VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); - uptr shadow_start = - FindAvailableMemoryRange(space_size, alignment, granularity, - &largest_gap_found, &max_occupied_addr); - // If the shadow doesn't fit, restrict the address space to make it fit. - if (shadow_start == 0) { - VReport( - 2, - "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", - largest_gap_found, max_occupied_addr); - uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); - if (new_max_vm < max_occupied_addr) { - Report("Unable to find a memory range for dynamic shadow.\n"); - Report( - "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " - "new_max_vm = %p\n", - space_size, largest_gap_found, max_occupied_addr, new_max_vm); - CHECK(0 && "cannot place shadow"); - } - RestrictMemoryToMaxAddress(new_max_vm); - kHighMemEnd = new_max_vm - 1; - space_size = kHighShadowEnd + left_padding; - VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); - shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, - nullptr, nullptr); - if (shadow_start == 0) { - Report("Unable to find a memory range after restricting VM.\n"); - CHECK(0 && "cannot place shadow after restricting vm"); - } - } - CHECK_NE((uptr)0, shadow_start); - CHECK(IsAligned(shadow_start, alignment)); - return shadow_start; + return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE, + /*min_shadow_base_alignment*/ 0, kHighMemEnd); } // No-op. Mac does not support static linkage anyway. diff --git a/compiler-rt/lib/asan/asan_mapping.h b/compiler-rt/lib/asan/asan_mapping.h index 41fb49ee46d460..c64c0335673140 100644 --- a/compiler-rt/lib/asan/asan_mapping.h +++ b/compiler-rt/lib/asan/asan_mapping.h @@ -304,6 +304,7 @@ extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. namespace __asan { +static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; } static inline bool AddrIsInLowMem(uptr a) { PROFILE_ASAN_MAPPING(); return a <= kLowMemEnd; diff --git a/compiler-rt/lib/asan/asan_premap_shadow.cpp b/compiler-rt/lib/asan/asan_premap_shadow.cpp index 7835e99748ffa7..666bb9b34bd399 100644 --- a/compiler-rt/lib/asan/asan_premap_shadow.cpp +++ b/compiler-rt/lib/asan/asan_premap_shadow.cpp @@ -32,22 +32,8 @@ uptr PremapShadowSize() { // Returns an address aligned to 8 pages, such that one page on the left and // PremapShadowSize() bytes on the right of it are mapped r/o. uptr PremapShadow() { - uptr granularity = GetMmapGranularity(); - uptr alignment = granularity * 8; - uptr left_padding = granularity; - uptr shadow_size = PremapShadowSize(); - uptr map_size = shadow_size + left_padding + alignment; - - uptr map_start = (uptr)MmapNoAccess(map_size); - CHECK_NE(map_start, ~(uptr)0); - - uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); - uptr shadow_end = shadow_start + shadow_size; - internal_munmap(reinterpret_cast(map_start), - shadow_start - left_padding - map_start); - internal_munmap(reinterpret_cast(shadow_end), - map_start + map_size - shadow_end); - return shadow_start; + return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3, + /*min_shadow_base_alignment*/ 0, kHighMemEnd); } bool PremapShadowFailed() { diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp index 463bfa02f9f16a..115733cdaa48e1 100644 --- a/compiler-rt/lib/asan/asan_rtl.cpp +++ b/compiler-rt/lib/asan/asan_rtl.cpp @@ -319,7 +319,7 @@ static void InitializeHighMemEnd() { kHighMemEnd = GetMaxUserVirtualAddress(); // Increase kHighMemEnd to make sure it's properly // aligned together with kHighMemBeg: - kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; + kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1; #endif // !ASAN_FIXED_MAPPING CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); #endif // !SANITIZER_MYRIAD2 diff --git a/compiler-rt/lib/asan/asan_shadow_setup.cpp b/compiler-rt/lib/asan/asan_shadow_setup.cpp index 17324932a86f92..0e2623a23028e0 100644 --- a/compiler-rt/lib/asan/asan_shadow_setup.cpp +++ b/compiler-rt/lib/asan/asan_shadow_setup.cpp @@ -22,24 +22,6 @@ namespace __asan { -// ---------------------- mmap -------------------- {{{1 -// Reserve memory range [beg, end]. -// We need to use inclusive range because end+1 may not be representable. -void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { - CHECK_EQ((beg % GetMmapGranularity()), 0); - CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); - uptr size = end - beg + 1; - DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - if (!MmapFixedSuperNoReserve(beg, size, name)) { - Report( - "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " - "Perhaps you're using ulimit -v\n", - size); - Abort(); - } - if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); -} - static void ProtectGap(uptr addr, uptr size) { if (!flags()->protect_shadow_gap) { // The shadow gap is unprotected, so there is a chance that someone @@ -57,26 +39,8 @@ static void ProtectGap(uptr addr, uptr size) { "unprotected gap shadow"); return; } - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) return; - // A few pages at the start of the address space can not be protected. - // But we really want to protect as much as possible, to prevent this memory - // being returned as a result of a non-FIXED mmap(). - if (addr == kZeroBaseShadowStart) { - uptr step = GetMmapGranularity(); - while (size > step && addr < kZeroBaseMaxShadowStart) { - addr += step; - size -= step; - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) return; - } - } - - Report( - "ERROR: Failed to protect the shadow gap. " - "ASan cannot proceed correctly. ABORTING.\n"); - DumpProcessMap(); - Die(); + __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart, + kZeroBaseMaxShadowStart); } static void MaybeReportLinuxPIEBug() { @@ -99,8 +63,6 @@ void InitializeShadowMemory() { // |kDefaultShadowSentinel|. bool full_shadow_is_available = false; if (shadow_start == kDefaultShadowSentinel) { - __asan_shadow_memory_dynamic_address = 0; - CHECK_EQ(0, kLowShadowBeg); shadow_start = FindDynamicShadowStart(); if (SANITIZER_LINUX) full_shadow_is_available = true; } diff --git a/compiler-rt/lib/asan/asan_win.cpp b/compiler-rt/lib/asan/asan_win.cpp index 03feddbe86b44c..fe635c2d5b6b49 100644 --- a/compiler-rt/lib/asan/asan_win.cpp +++ b/compiler-rt/lib/asan/asan_win.cpp @@ -247,15 +247,8 @@ void *AsanDoesNotSupportStaticLinkage() { } uptr FindDynamicShadowStart() { - uptr granularity = GetMmapGranularity(); - uptr alignment = 8 * granularity; - uptr left_padding = granularity; - uptr space_size = kHighShadowEnd + left_padding; - uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, - granularity, nullptr, nullptr); - CHECK_NE((uptr)0, shadow_start); - CHECK(IsAligned(shadow_start, alignment)); - return shadow_start; + return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE, + /*min_shadow_base_alignment*/ 0, kHighMemEnd); } void AsanCheckDynamicRTPrereqs() {} diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp index d67a88d455efff..11b4d3891bc2cf 100644 --- a/compiler-rt/lib/hwasan/hwasan.cpp +++ b/compiler-rt/lib/hwasan/hwasan.cpp @@ -286,8 +286,6 @@ void __hwasan_init() { // initialized when InitInstrumentation() was called. GetCurrentThread()->InitRandomState(); - MadviseShadow(); - SetPrintfAndReportCallback(AppendToErrorMessageBuffer); // This may call libc -> needs initialized shadow. AndroidLogInit(); diff --git a/compiler-rt/lib/hwasan/hwasan.h b/compiler-rt/lib/hwasan/hwasan.h index 8cbd9e74e33506..b8b7a1865e8606 100644 --- a/compiler-rt/lib/hwasan/hwasan.h +++ b/compiler-rt/lib/hwasan/hwasan.h @@ -75,7 +75,6 @@ extern int hwasan_report_count; bool InitShadow(); void InitPrctl(); void InitThreads(); -void MadviseShadow(); void InitializeInterceptors(); void HwasanAllocatorInit(); diff --git a/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp b/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp index a04751f44a3112..12730b29bae367 100644 --- a/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp +++ b/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp @@ -24,47 +24,6 @@ // The code in this file needs to run in an unrelocated binary. It should not // access any external symbol, including its own non-hidden globals. -namespace __hwasan { - -static void UnmapFromTo(uptr from, uptr to) { - if (to == from) - return; - CHECK(to >= from); - uptr res = internal_munmap(reinterpret_cast(from), to - from); - if (UNLIKELY(internal_iserror(res))) { - Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", - SanitizerToolName, to - from, to - from, from); - CHECK("unable to unmap" && 0); - } -} - -// Returns an address aligned to kShadowBaseAlignment, such that -// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right -// of it are mapped no access. -static uptr MapDynamicShadow(uptr shadow_size_bytes) { - const uptr granularity = GetMmapGranularity(); - const uptr min_alignment = granularity << kShadowScale; - const uptr alignment = 1ULL << kShadowBaseAlignment; - CHECK_GE(alignment, min_alignment); - - const uptr left_padding = 1ULL << kShadowBaseAlignment; - const uptr shadow_size = - RoundUpTo(shadow_size_bytes, granularity); - const uptr map_size = shadow_size + left_padding + alignment; - - const uptr map_start = (uptr)MmapNoAccess(map_size); - CHECK_NE(map_start, ~(uptr)0); - - const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); - - UnmapFromTo(map_start, shadow_start - left_padding); - UnmapFromTo(shadow_start + shadow_size, map_start + map_size); - - return shadow_start; -} - -} // namespace __hwasan - #if SANITIZER_ANDROID extern "C" { @@ -82,7 +41,8 @@ static uptr PremapShadowSize() { } static uptr PremapShadow() { - return MapDynamicShadow(PremapShadowSize()); + return MapDynamicShadow(PremapShadowSize(), kShadowScale, + kShadowBaseAlignment, kHighMemEnd); } static bool IsPremapShadowAvailable() { @@ -146,7 +106,8 @@ void InitShadowGOT() { uptr FindDynamicShadowStart(uptr shadow_size_bytes) { if (IsPremapShadowAvailable()) return FindPremappedShadowStart(shadow_size_bytes); - return MapDynamicShadow(shadow_size_bytes); + return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment, + kHighMemEnd); } } // namespace __hwasan @@ -156,7 +117,8 @@ namespace __hwasan { void InitShadowGOT() {} uptr FindDynamicShadowStart(uptr shadow_size_bytes) { - return MapDynamicShadow(shadow_size_bytes); + return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment, + kHighMemEnd); } } // namespace __hwasan diff --git a/compiler-rt/lib/hwasan/hwasan_linux.cpp b/compiler-rt/lib/hwasan/hwasan_linux.cpp index f1e830ddf901f3..e99926d355cfa1 100644 --- a/compiler-rt/lib/hwasan/hwasan_linux.cpp +++ b/compiler-rt/lib/hwasan/hwasan_linux.cpp @@ -57,56 +57,24 @@ THREADLOCAL uptr __hwasan_tls; namespace __hwasan { -static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { - CHECK_EQ((beg % GetMmapGranularity()), 0); - CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); - uptr size = end - beg + 1; - DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - if (!MmapFixedNoReserve(beg, size, name)) { - Report( - "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " - "Perhaps you're using ulimit -v\n", - size); - Abort(); - } -} +// With the zero shadow base we can not actually map pages starting from 0. +// This constant is somewhat arbitrary. +constexpr uptr kZeroBaseShadowStart = 0; +constexpr uptr kZeroBaseMaxShadowStart = 1 << 18; static void ProtectGap(uptr addr, uptr size) { - if (!size) - return; - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) - return; - // A few pages at the start of the address space can not be protected. - // But we really want to protect as much as possible, to prevent this memory - // being returned as a result of a non-FIXED mmap(). - if (addr == 0) { - uptr step = GetMmapGranularity(); - while (size > step) { - addr += step; - size -= step; - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) - return; - } - } - - Report( - "ERROR: Failed to protect shadow gap [%p, %p]. " - "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr, - (void *)(addr + size)); - DumpProcessMap(); - Die(); + __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart, + kZeroBaseMaxShadowStart); } -static uptr kLowMemStart; -static uptr kLowMemEnd; -static uptr kLowShadowEnd; -static uptr kLowShadowStart; -static uptr kHighShadowStart; -static uptr kHighShadowEnd; -static uptr kHighMemStart; -static uptr kHighMemEnd; +uptr kLowMemStart; +uptr kLowMemEnd; +uptr kLowShadowEnd; +uptr kLowShadowStart; +uptr kHighShadowStart; +uptr kHighShadowEnd; +uptr kHighMemStart; +uptr kHighMemEnd; static void PrintRange(uptr start, uptr end, const char *name) { Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); @@ -242,24 +210,12 @@ void InitThreads() { uptr thread_space_end = __hwasan_shadow_memory_dynamic_address - guard_page_size; ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1, - "hwasan threads"); + "hwasan threads", /*madvise_shadow*/ false); ProtectGap(thread_space_end, __hwasan_shadow_memory_dynamic_address - thread_space_end); InitThreadList(thread_space_start, thread_space_end - thread_space_start); } -static void MadviseShadowRegion(uptr beg, uptr end) { - uptr size = end - beg + 1; - SetShadowRegionHugePageMode(beg, size); - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(beg, size); -} - -void MadviseShadow() { - MadviseShadowRegion(kLowShadowStart, kLowShadowEnd); - MadviseShadowRegion(kHighShadowStart, kHighShadowEnd); -} - bool MemIsApp(uptr p) { CHECK(GetTagFromPointer(p) == 0); return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); diff --git a/compiler-rt/lib/hwasan/hwasan_mapping.h b/compiler-rt/lib/hwasan/hwasan_mapping.h index a86ad7ca803602..c149687bdfa60e 100644 --- a/compiler-rt/lib/hwasan/hwasan_mapping.h +++ b/compiler-rt/lib/hwasan/hwasan_mapping.h @@ -39,6 +39,15 @@ constexpr uptr kShadowAlignment = 1ULL << kShadowScale; namespace __hwasan { +extern uptr kLowMemStart; +extern uptr kLowMemEnd; +extern uptr kLowShadowEnd; +extern uptr kLowShadowStart; +extern uptr kHighShadowStart; +extern uptr kHighShadowEnd; +extern uptr kHighMemStart; +extern uptr kHighMemEnd; + inline uptr MemToShadow(uptr untagged_addr) { return (untagged_addr >> kShadowScale) + __hwasan_shadow_memory_dynamic_address; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h index 07b307a602c975..72e92e7ad70ea8 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -121,6 +121,30 @@ bool MprotectReadOnly(uptr addr, uptr size); void MprotectMallocZones(void *addr, int prot); +// Get the max address, taking into account alignment due to the mmap +// granularity and shadow size. +uptr GetHighMemEnd(uptr shadow_scale); + +// Maps shadow_size_bytes of shadow memory and returns shadow address. It will +// be aligned to the mmap granularity * 2^shadow_scale, or to +// 2^min_shadow_base_alignment if that is larger. The returned address will +// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and +// shadow_size_bytes bytes on the right, mapped no access. +// The high_mem_end may be updated if the original shadow size doesn't fit. +uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, + uptr min_shadow_base_alignment, uptr &high_mem_end); + +// Reserve memory range [beg, end]. If madvise_shadow is true then apply +// madvise (e.g. hugepages, core dumping) requested by options. +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name, + bool madvise_shadow = true); + +// Protect size bytes of memory starting at addr. Also try to protect +// several pages at the start of the address space as specified by +// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start. +void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start, + uptr zero_base_max_shadow_start); + // Find an available address space. uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr *largest_gap_found, uptr *max_occupied_addr); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp index 0c918ebb4a9d65..ddd688bb2dca47 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp @@ -139,6 +139,55 @@ uptr ReservedAddressRange::InitAligned(uptr size, uptr align, return start; } +// Reserve memory range [beg, end]. +// We need to use inclusive range because end+1 may not be representable. +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name, + bool madvise_shadow) { + CHECK_EQ((beg % GetMmapGranularity()), 0); + CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); + uptr size = end - beg + 1; + DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. + if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name) + : !MmapFixedNoReserve(beg, size, name)) { + Report( + "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " + "Perhaps you're using ulimit -v\n", + size); + Abort(); + } + if (madvise_shadow && common_flags()->use_madv_dontdump) + DontDumpShadowMemory(beg, size); +} + +void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start, + uptr zero_base_max_shadow_start) { + if (!size) + return; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == zero_base_shadow_start) { + uptr step = GetMmapGranularity(); + while (size > step && addr < zero_base_max_shadow_start) { + addr += step; + size -= step; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + } + } + + Report( + "ERROR: Failed to protect the shadow gap. " + "%s cannot proceed correctly. ABORTING.\n", + SanitizerToolName); + DumpProcessMap(); + Die(); +} + } // namespace __sanitizer SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify, diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp index 4d17c9686e4ed2..d4e747d74ff392 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -841,6 +841,41 @@ void ReExec() { } #endif // !SANITIZER_OPENBSD +static void UnmapFromTo(uptr from, uptr to) { + if (to == from) + return; + CHECK(to >= from); + uptr res = internal_munmap(reinterpret_cast(from), to - from); + if (UNLIKELY(internal_iserror(res))) { + Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", + SanitizerToolName, to - from, to - from, (void *)from); + CHECK("unable to unmap" && 0); + } +} + +uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, + uptr min_shadow_base_alignment, + UNUSED uptr &high_mem_end) { + const uptr granularity = GetMmapGranularity(); + const uptr alignment = + Max(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); + const uptr left_padding = + Max(granularity, 1ULL << min_shadow_base_alignment); + + const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); + const uptr map_size = shadow_size + left_padding + alignment; + + const uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + + UnmapFromTo(map_start, shadow_start - left_padding); + UnmapFromTo(shadow_start + shadow_size, map_start + map_size); + + return shadow_start; +} + } // namespace __sanitizer #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp index eb9c662190e729..883786e867e71d 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -1099,6 +1099,53 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } +uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, + uptr min_shadow_base_alignment, uptr &high_mem_end) { + const uptr granularity = GetMmapGranularity(); + const uptr alignment = + Max(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); + const uptr left_padding = + Max(granularity, 1ULL << min_shadow_base_alignment); + + uptr space_size = shadow_size_bytes + left_padding; + + uptr largest_gap_found = 0; + uptr max_occupied_addr = 0; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + uptr shadow_start = + FindAvailableMemoryRange(space_size, alignment, granularity, + &largest_gap_found, &max_occupied_addr); + // If the shadow doesn't fit, restrict the address space to make it fit. + if (shadow_start == 0) { + VReport( + 2, + "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", + largest_gap_found, max_occupied_addr); + uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment); + if (new_max_vm < max_occupied_addr) { + Report("Unable to find a memory range for dynamic shadow.\n"); + Report( + "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " + "new_max_vm = %p\n", + space_size, largest_gap_found, max_occupied_addr, new_max_vm); + CHECK(0 && "cannot place shadow"); + } + RestrictMemoryToMaxAddress(new_max_vm); + high_mem_end = new_max_vm - 1; + space_size = (high_mem_end >> shadow_scale) + left_padding; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, + nullptr, nullptr); + if (shadow_start == 0) { + Report("Unable to find a memory range after restricting VM.\n"); + CHECK(0 && "cannot place shadow after restricting vm"); + } + } + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr *largest_gap_found, uptr *max_occupied_addr) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp index fca15beb61612d..53a537d3984754 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp @@ -348,6 +348,22 @@ bool DontDumpShadowMemory(uptr addr, uptr length) { return true; } +uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, + uptr min_shadow_base_alignment, + UNUSED uptr &high_mem_end) { + const uptr granularity = GetMmapGranularity(); + const uptr alignment = + Max(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); + const uptr left_padding = + Max(granularity, 1ULL << min_shadow_base_alignment); + uptr space_size = shadow_size_bytes + left_padding; + uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, + granularity, nullptr, nullptr); + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr *largest_gap_found, uptr *max_occupied_addr) {