Skip to content

Commit

Permalink
[esan] EfficiencySanitizer shadow memory
Browse files Browse the repository at this point in the history
Summary:
Adds shadow memory mapping support common to all tools to the new
Efficiencysanitizer ("esan") family of tools.  This includes:

+ Shadow memory layout and mapping support for 64-bit Linux for any
  power-of-2 scale-down (1x, 2x, 4x, 8x, 16x, etc.) that ensures that
  shadow(shadow(address)) does not overlap shadow or application
  memory.

+ Mmap interception to ensure the application does not map on top of
  our shadow memory.

+ Init-time sanity checks for shadow regions.

+ A test of the mmap conflict mechanism.

Reviewers: aizatsky, filcab

Subscribers: filcab, kubabrecka, llvm-commits, vitalybuka, eugenis, kcc, zhaoqin

Differential Revision: http://reviews.llvm.org/D19921

llvm-svn: 269198
  • Loading branch information
derekbruening committed May 11, 2016
1 parent ee20294 commit 1658c08
Show file tree
Hide file tree
Showing 5 changed files with 363 additions and 0 deletions.
66 changes: 66 additions & 0 deletions compiler-rt/lib/esan/esan.cpp
Expand Up @@ -14,6 +14,7 @@

#include "esan.h"
#include "esan_interface_internal.h"
#include "esan_shadow.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
Expand All @@ -27,6 +28,7 @@ namespace __esan {

bool EsanIsInitialized;
ToolType WhichTool;
ShadowMapping Mapping;

static const char EsanOptsEnv[] = "ESAN_OPTIONS";

Expand Down Expand Up @@ -59,6 +61,69 @@ void processRangeAccess(uptr PC, uptr Addr, int Size, bool IsWrite) {
}
}

#if SANITIZER_DEBUG
static bool verifyShadowScheme() {
// Sanity checks for our shadow mapping scheme.
for (int Scale = 0; Scale < 8; ++Scale) {
Mapping.initialize(Scale);
uptr AppStart, AppEnd;
for (int i = 0; getAppRegion(i, &AppStart, &AppEnd); ++i) {
DCHECK(isAppMem(AppStart));
DCHECK(!isAppMem(AppStart - 1));
DCHECK(isAppMem(AppEnd - 1));
DCHECK(!isAppMem(AppEnd));
DCHECK(!isShadowMem(AppStart));
DCHECK(!isShadowMem(AppEnd - 1));
DCHECK(isShadowMem(appToShadow(AppStart)));
DCHECK(isShadowMem(appToShadow(AppEnd - 1)));
// Double-shadow checks.
DCHECK(!isShadowMem(appToShadow(appToShadow(AppStart))));
DCHECK(!isShadowMem(appToShadow(appToShadow(AppEnd - 1))));
}
// Ensure no shadow regions overlap each other.
uptr ShadowAStart, ShadowBStart, ShadowAEnd, ShadowBEnd;
for (int i = 0; getShadowRegion(i, &ShadowAStart, &ShadowAEnd); ++i) {
for (int j = 0; getShadowRegion(j, &ShadowBStart, &ShadowBEnd); ++j) {
DCHECK(i == j || ShadowAStart >= ShadowBEnd ||
ShadowAEnd <= ShadowBStart);
}
}
}
return true;
}
#endif

static void initializeShadow() {
DCHECK(verifyShadowScheme());

if (WhichTool == ESAN_CacheFrag)
Mapping.initialize(2); // 4B:1B, so 4 to 1 == >>2.
else
UNREACHABLE("unknown tool shadow mapping");

VPrintf(1, "Shadow scale=%d offset=%p\n", Mapping.Scale, Mapping.Offset);

uptr ShadowStart, ShadowEnd;
for (int i = 0; getShadowRegion(i, &ShadowStart, &ShadowEnd); ++i) {
VPrintf(1, "Shadow #%d: [%zx-%zx) (%zuGB)\n", i, ShadowStart, ShadowEnd,
(ShadowEnd - ShadowStart) >> 30);

uptr Map = (uptr)MmapFixedNoReserve(ShadowStart, ShadowEnd - ShadowStart,
"shadow");
if (Map != ShadowStart) {
Printf("FATAL: EfficiencySanitizer failed to map its shadow memory.\n");
Die();
}

if (common_flags()->no_huge_pages_for_shadow)
NoHugePagesInRegion(ShadowStart, ShadowEnd - ShadowStart);
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(ShadowStart, ShadowEnd - ShadowStart);

// TODO: Call MmapNoAccess() on in-between regions.
}
}

static void initializeFlags() {
// Once we add our own flags we'll parse them here.
// For now the common ones are sufficient.
Expand Down Expand Up @@ -95,6 +160,7 @@ void initializeLibrary(ToolType Tool) {
Die();
}

initializeShadow();
initializeInterceptors();

EsanIsInitialized = true;
Expand Down
67 changes: 67 additions & 0 deletions compiler-rt/lib/esan/esan_interceptors.cpp
Expand Up @@ -13,13 +13,27 @@
//===----------------------------------------------------------------------===//

#include "esan.h"
#include "esan_shadow.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_stacktrace.h"

using namespace __esan; // NOLINT

// FIXME: if this gets more complex as more platforms are added we may
// want to split pieces into separate platform-specific files.
#if SANITIZER_LINUX
// Sanitizer runtimes in general want to avoid including system headers.
// We define the few constants we need here:
const int EINVAL = 22; // from /usr/include/asm-generic/errno-base.h
const int MAP_FIXED = 0x10; // from /usr/include/sys/mman.h
extern "C" int *__errno_location();
#define errno (*__errno_location())
#else
#error Other platforms are not yet supported.
#endif

#define CUR_PC() (StackTrace::GetCurrentPc())

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -388,6 +402,56 @@ INTERCEPTOR(int, rmdir, char *path) {
return REAL(rmdir)(path);
}

//===----------------------------------------------------------------------===//
// Shadow-related interceptors
//===----------------------------------------------------------------------===//

// These are candidates for sharing with all sanitizers if shadow memory
// support is also standardized.

static bool fixMmapAddr(void **addr, SIZE_T sz, int flags) {
if (*addr) {
uptr AppStart, AppEnd;
bool SingleApp = false;
for (int i = 0; getAppRegion(i, &AppStart, &AppEnd); ++i) {
if ((uptr)*addr >= AppStart && (uptr)*addr + sz - 1 <= AppEnd) {
SingleApp = true;
break;
}
}
if (!SingleApp) {
VPrintf(1, "mmap conflict: [%p-%p) is not in an app region\n",
*addr, (uptr)*addr + sz);
if (flags & MAP_FIXED) {
errno = EINVAL;
return false;
} else {
*addr = 0;
}
}
}
return true;
}

INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
int fd, OFF_T off) {
if (!fixMmapAddr(&addr, sz, flags))
return (void *)-1;
return REAL(mmap)(addr, sz, prot, flags, fd, off);
}

#if SANITIZER_LINUX
INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
int fd, OFF64_T off) {
if (!fixMmapAddr(&addr, sz, flags))
return (void *)-1;
return REAL(mmap64)(addr, sz, prot, flags, fd, off);
}
#define ESAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64)
#else
#define ESAN_MAYBE_INTERCEPT_MMAP64
#endif

namespace __esan {

void initializeInterceptors() {
Expand All @@ -411,6 +475,9 @@ void initializeInterceptors() {
INTERCEPT_FUNCTION(puts);
INTERCEPT_FUNCTION(rmdir);

INTERCEPT_FUNCTION(mmap);
ESAN_MAYBE_INTERCEPT_MMAP64;

// TODO(bruening): we should intercept calloc() and other memory allocation
// routines that zero memory and update our shadow memory appropriately.

Expand Down
197 changes: 197 additions & 0 deletions compiler-rt/lib/esan/esan_shadow.h
@@ -0,0 +1,197 @@
//===-- esan_shadow.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of EfficiencySanitizer, a family of performance tuners.
//
// Shadow memory mappings for the esan run-time.
//===----------------------------------------------------------------------===//

#ifndef ESAN_SHADOW_H
#define ESAN_SHADOW_H

#include <sanitizer_common/sanitizer_platform.h>

#if SANITIZER_WORDSIZE != 64
#error Only 64-bit is supported
#endif

namespace __esan {

#if SANITIZER_LINUX && defined(__x86_64__)
// Linux x86_64
//
// Application memory falls into these 5 regions (ignoring the corner case
// of PIE with a non-zero PT_LOAD base):
//
// [0x00000000'00000000, 0x00000100'00000000) non-PIE + heap
// [0x00005500'00000000, 0x00005700'00000000) PIE
// [0x00007f00'00000000, 0x00007fff'ff600000) libraries + stack, part 1
// [0x00007fff'ff601000, 0x00008000'00000000) libraries + stack, part 2
// [0xffffffff'ff600000, 0xffffffff'ff601000) vsyscall
//
// Although we can ignore the vsyscall for the most part as there are few data
// references there (other sanitizers ignore it), we enforce a gap inside the
// library region to distinguish the vsyscall's shadow, considering this gap to
// be an invalid app region.
//
// We disallow application memory outside of those 5 regions.
//
// Our shadow memory is scaled from a 1:1 mapping and supports a scale
// specified at library initialization time that can be any power-of-2
// scaledown (1x, 2x, 4x, 8x, 16x, etc.).
//
// We model our shadow memory after Umbra, a library used by the Dr. Memory
// tool: https://github.com/DynamoRIO/drmemory/blob/master/umbra/umbra_x64.c.
// We use Umbra's scheme as it was designed to support different
// offsets, it supports two different shadow mappings (which we may want to
// use for future tools), and it ensures that the shadow of a shadow will
// not overlap either shadow memory or application memory.
//
// This formula translates from application memory to shadow memory:
//
// shadow(app) = ((app & 0x00000fff'ffffffff) + offset) >> scale
//
// Where the offset for 1:1 is 0x00001200'00000000. For other scales, the
// offset is shifted left by the scale, except for scales of 1 and 2 where
// it must be tweaked in order to pass the double-shadow test
// (see the "shadow(shadow)" comments below):
// scale == 0: 0x0000120'000000000
// scale == 1: 0x0000220'000000000
// scale == 2: 0x0000440'000000000
// scale >= 3: (0x0000120'000000000 << scale)
//
// Do not pass in the open-ended end value to the formula as it will fail.
//
// The resulting shadow memory regions for a 0 scaling are:
//
// [0x00001200'00000000, 0x00001300'00000000)
// [0x00001700'00000000, 0x00001900'00000000)
// [0x00002100'00000000, 0x000021ff'ff600000)
// [0x000021ff'ff601000, 0x00002200'00000000)
// [0x000021ff'ff600000, 0x000021ff'ff601000]
//
// We also want to ensure that a wild access by the application into the shadow
// regions will not corrupt our own shadow memory. shadow(shadow) ends up
// disjoint from shadow(app):
//
// [0x00001400'00000000, 0x00001500'00000000)
// [0x00001900'00000000, 0x00001b00'00000000)
// [0x00001300'00000000, 0x000013ff'ff600000]
// [0x000013ff'ff601000, 0x00001400'00000000]
// [0x000013ff'ff600000, 0x000013ff'ff601000]

struct ApplicationRegion {
uptr Start;
uptr End;
bool ShadowMergedWithPrev;
};

static const struct ApplicationRegion AppRegions[] = {
{0x0000000000000000ull, 0x0000010000000000u, false},
{0x0000550000000000u, 0x0000570000000000u, false},
// We make one shadow mapping to hold the shadow regions for all 3 of these
// app regions, as the mappings interleave, and the gap between the 3rd and
// 4th scales down below a page.
{0x00007f0000000000u, 0x00007fffff600000u, false},
{0x00007fffff601000u, 0x0000800000000000u, true},
{0xffffffffff600000u, 0xffffffffff601000u, true},
};
static const u32 NumAppRegions = sizeof(AppRegions)/sizeof(AppRegions[0]);

class ShadowMapping {
public:
static const uptr Mask = 0x00000fffffffffffu;
// The scale and offset vary by tool.
uptr Scale;
uptr Offset;
void initialize(uptr ShadowScale) {
static const uptr OffsetArray[3] = {
0x0000120000000000u,
0x0000220000000000u,
0x0000440000000000u,
};
Scale = ShadowScale;
if (Scale <= 2)
Offset = OffsetArray[Scale];
else
Offset = OffsetArray[0] << Scale;
}
};
extern ShadowMapping Mapping;
#else
// We'll want to use templatized functions over the ShadowMapping once
// we support more platforms.
#error Platform not supported
#endif

static inline bool getAppRegion(u32 i, uptr *Start, uptr *End) {
if (i >= NumAppRegions)
return false;
*Start = AppRegions[i].Start;
*End = AppRegions[i].End;
return true;
}

ALWAYS_INLINE
bool isAppMem(uptr Mem) {
for (u32 i = 0; i < NumAppRegions; ++i) {
if (Mem >= AppRegions[i].Start && Mem < AppRegions[i].End)
return true;
}
return false;
}

ALWAYS_INLINE
uptr appToShadow(uptr App) {
DCHECK(isAppMem(App));
return (((App & ShadowMapping::Mask) + Mapping.Offset) >> Mapping.Scale);
}

static inline bool getShadowRegion(u32 i, uptr *Start, uptr *End) {
if (i >= NumAppRegions)
return false;
u32 UnmergedShadowCount = 0;
u32 AppIdx;
for (AppIdx = 0; AppIdx < NumAppRegions; ++AppIdx) {
if (!AppRegions[AppIdx].ShadowMergedWithPrev) {
if (UnmergedShadowCount == i)
break;
UnmergedShadowCount++;
}
}
if (AppIdx >= NumAppRegions || UnmergedShadowCount != i)
return false;
*Start = appToShadow(AppRegions[AppIdx].Start);
// The formula fails for the end itself.
*End = appToShadow(AppRegions[AppIdx].End - 1) + 1;
// Merge with adjacent shadow regions:
for (++AppIdx; AppIdx < NumAppRegions; ++AppIdx) {
if (!AppRegions[AppIdx].ShadowMergedWithPrev)
break;
*Start = Min(*Start, appToShadow(AppRegions[AppIdx].Start));
*End = Max(*End, appToShadow(AppRegions[AppIdx].End - 1) + 1);
}
return true;
}

ALWAYS_INLINE
bool isShadowMem(uptr Mem) {
// We assume this is not used on any critical performance path and so there's
// no need to hardcode the mapping results.
for (uptr i = 0; i < NumAppRegions; ++i) {
if (Mem >= appToShadow(AppRegions[i].Start) &&
Mem < appToShadow(AppRegions[i].End))
return true;
}
return false;
}

} // namespace __esan

#endif /* ESAN_SHADOW_H */

0 comments on commit 1658c08

Please sign in to comment.