146 changes: 13 additions & 133 deletions compiler-rt/lib/tsan/rtl/tsan_interface_java.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,54 +22,17 @@

using namespace __tsan; // NOLINT

namespace __tsan {

const uptr kHeapShadow = 0x300000000000ull;
const uptr kHeapAlignment = 8;
const jptr kHeapAlignment = 8;

struct BlockDesc {
bool begin;
Mutex mtx;
SyncVar *head;

BlockDesc()
: mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
, head() {
CHECK_EQ(begin, false);
begin = true;
}

~BlockDesc() {
CHECK_EQ(begin, true);
begin = false;
ThreadState *thr = cur_thread();
SyncVar *s = head;
while (s) {
SyncVar *s1 = s->next;
StatInc(thr, StatSyncDestroyed);
s->mtx.Lock();
s->mtx.Unlock();
thr->mset.Remove(s->GetId());
DestroyAndFree(s);
s = s1;
}
}
};
namespace __tsan {

struct JavaContext {
const uptr heap_begin;
const uptr heap_size;
BlockDesc *heap_shadow;

JavaContext(jptr heap_begin, jptr heap_size)
: heap_begin(heap_begin)
, heap_size(heap_size) {
uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
if ((uptr)heap_shadow != kHeapShadow) {
Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
Die();
}
}
};

Expand All @@ -93,63 +56,6 @@ class ScopedJavaFunc {
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;

static BlockDesc *getblock(uptr addr) {
uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
return &jctx->heap_shadow[i];
}

static uptr USED getmem(BlockDesc *b) {
uptr i = b - jctx->heap_shadow;
uptr p = jctx->heap_begin + i * kHeapAlignment;
CHECK_GE(p, jctx->heap_begin);
CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
return p;
}

static BlockDesc *getblockbegin(uptr addr) {
for (BlockDesc *b = getblock(addr);; b--) {
CHECK_GE(b, jctx->heap_shadow);
if (b->begin)
return b;
}
return 0;
}

SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
bool write_lock, bool create) {
if (jctx == 0 || addr < jctx->heap_begin
|| addr >= jctx->heap_begin + jctx->heap_size)
return 0;
BlockDesc *b = getblockbegin(addr);
DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
Lock l(&b->mtx);
SyncVar *s = b->head;
for (; s; s = s->next) {
if (s->addr == addr) {
DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
break;
}
}
if (s == 0 && create) {
DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
s = ctx->synctab.Create(thr, pc, addr);
s->next = b->head;
b->head = s;
}
if (s) {
if (write_lock)
s->mtx.Lock();
else
s->mtx.ReadLock();
}
return s;
}

SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
// We do not destroy Java mutexes other than in __tsan_java_free().
return 0;
}

} // namespace __tsan

#define SCOPED_JAVA_FUNC(func) \
Expand Down Expand Up @@ -192,8 +98,7 @@ void __tsan_java_alloc(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);

BlockDesc *b = getblock(ptr);
new(b) BlockDesc();
OnUserAlloc(thr, pc, ptr, size, false);
}

void __tsan_java_free(jptr ptr, jptr size) {
Expand All @@ -206,12 +111,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);

BlockDesc *beg = getblock(ptr);
BlockDesc *end = getblock(ptr + size);
for (BlockDesc *b = beg; b != end; b++) {
if (b->begin)
b->~BlockDesc();
}
ctx->metamap.FreeRange(thr, pc, ptr, size);
}

void __tsan_java_move(jptr src, jptr dst, jptr size) {
Expand All @@ -230,35 +130,15 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {

// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
{ // NOLINT
BlockDesc *s = getblock(src);
BlockDesc *d = getblock(dst);
BlockDesc *send = getblock(src + size);
for (; s != send; s++, d++) {
CHECK_EQ(d->begin, false);
if (s->begin) {
DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
new(d) BlockDesc;
d->head = s->head;
for (SyncVar *sync = d->head; sync; sync = sync->next) {
uptr newaddr = sync->addr - src + dst;
DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
sync->addr = newaddr;
}
s->head = 0;
s->~BlockDesc();
}
}
}

{ // NOLINT
u64 *s = (u64*)MemToShadow(src);
u64 *d = (u64*)MemToShadow(dst);
u64 *send = (u64*)MemToShadow(src + size);
for (; s != send; s++, d++) {
*d = *s;
*s = 0;
}
ctx->metamap.MoveMemory(src, dst, size);

// Move shadow.
u64 *s = (u64*)MemToShadow(src);
u64 *d = (u64*)MemToShadow(dst);
u64 *send = (u64*)MemToShadow(src + size);
for (; s != send; s++, d++) {
*d = *s;
*s = 0;
}
}

Expand Down
103 changes: 29 additions & 74 deletions compiler-rt/lib/tsan/rtl/tsan_mman.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,32 +29,6 @@ extern "C" void WEAK __tsan_free_hook(void *ptr) {

namespace __tsan {

COMPILER_CHECK(sizeof(MBlock) == 16);

void MBlock::Lock() {
atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
uptr v = atomic_load(a, memory_order_relaxed);
for (int iter = 0;; iter++) {
if (v & 1) {
if (iter < 10)
proc_yield(20);
else
internal_sched_yield();
v = atomic_load(a, memory_order_relaxed);
continue;
}
if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
break;
}
}

void MBlock::Unlock() {
atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
uptr v = atomic_load(a, memory_order_relaxed);
DCHECK(v & 1);
atomic_store(a, v & ~1, memory_order_relaxed);
}

struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
Expand Down Expand Up @@ -96,7 +70,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack, true);
OutputReport(ctx, rep);
OutputReport(thr, rep);
}
}

Expand All @@ -106,43 +80,36 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
b->Init(sz, thr->tid, CurrentStackId(thr, pc));
if (ctx && ctx->initialized) {
if (thr->ignore_reads_and_writes == 0)
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
else
MemoryResetRange(thr, pc, (uptr)p, sz);
}
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
SignalUnsafeCall(thr, pc);
return p;
}

void user_free(ThreadState *thr, uptr pc, void *p) {
CHECK_NE(p, (void*)0);
DPrintf("#%d: free(%p)\n", thr->tid, p);
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
if (b->ListHead()) {
MBlock::ScopedLock l(b);
for (SyncVar *s = b->ListHead(); s;) {
SyncVar *res = s;
s = s->next;
StatInc(thr, StatSyncDestroyed);
res->mtx.Lock();
res->mtx.Unlock();
DestroyAndFree(res);
}
b->ListReset();
}
if (ctx && ctx->initialized) {
if (thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
}
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p);
SignalUnsafeCall(thr, pc);
}

void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
ctx->metamap.AllocBlock(thr, pc, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
else
MemoryResetRange(thr, pc, (uptr)p, sz);
}

void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
}

void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
Expand All @@ -152,9 +119,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
if (p2 == 0)
return 0;
if (p) {
MBlock *b = user_mblock(thr, p);
CHECK_NE(b, 0);
internal_memcpy(p2, p, min(b->Size(), sz));
uptr oldsz = user_alloc_usable_size(thr, pc, p);
internal_memcpy(p2, p, min(oldsz, sz));
}
}
if (p)
Expand All @@ -165,17 +131,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
if (p == 0)
return 0;
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
return b ? b->Size() : 0;
}

MBlock *user_mblock(ThreadState *thr, void *p) {
CHECK_NE(p, 0);
Allocator *a = allocator();
void *b = a->GetBlockBegin(p);
if (b == 0)
return 0;
return (MBlock*)a->GetMetaData(b);
MBlock *b = ctx->metamap.GetBlock((uptr)p);
return b ? b->siz : 0;
}

void invoke_malloc_hook(void *ptr, uptr size) {
Expand Down Expand Up @@ -247,16 +204,14 @@ bool __tsan_get_ownership(void *p) {
uptr __tsan_get_allocated_size(void *p) {
if (p == 0)
return 0;
p = allocator()->GetBlockBegin(p);
if (p == 0)
return 0;
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
return b->Size();
MBlock *b = ctx->metamap.GetBlock((uptr)p);
return b->siz;
}

void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
allocator()->SwallowCache(&thr->alloc_cache);
internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
ctx->metamap.OnThreadIdle(thr);
}
} // extern "C"
4 changes: 0 additions & 4 deletions compiler-rt/lib/tsan/rtl/tsan_mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,6 @@ void user_free(ThreadState *thr, uptr pc, void *p);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
// Given the pointer p into a valid allocated block,
// returns the descriptor of the block.
MBlock *user_mblock(ThreadState *thr, void *p);

// Invoking malloc/free hooks that may be installed by the user.
void invoke_malloc_hook(void *ptr, uptr size);
Expand Down Expand Up @@ -62,7 +59,6 @@ enum MBlockType {
MBlockSuppression,
MBlockExpectRace,
MBlockSignal,
MBlockFD,
MBlockJmpBuf,

// This must be the last.
Expand Down
18 changes: 15 additions & 3 deletions compiler-rt/lib/tsan/rtl/tsan_mutex.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,13 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport},
/*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
/*3 MutexTypeReport*/ {MutexTypeSyncVar,
MutexTypeMBlock, MutexTypeJavaMBlock},
/*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
/*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
/*5 MutexTypeSyncTab*/ {}, // unused
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*7 MutexTypeAnnotations*/ {},
/*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
/*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
/*11 MutexTypeDDetector*/ {},
Expand Down Expand Up @@ -161,8 +161,20 @@ void InternalDeadlockDetector::Unlock(MutexType t) {
CHECK(locked_[t]);
locked_[t] = 0;
}

void InternalDeadlockDetector::CheckNoLocks() {
for (int i = 0; i != MutexTypeCount; i++) {
CHECK_EQ(locked_[i], 0);
}
}
#endif

void CheckNoLocks(ThreadState *thr) {
#if TSAN_DEBUG && !TSAN_GO
thr->internal_deadlock_detector.CheckNoLocks();
#endif
}

const uptr kUnlocked = 0;
const uptr kWriteLock = 1;
const uptr kReadLock = 2;
Expand Down
5 changes: 5 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,18 @@ class InternalDeadlockDetector {
InternalDeadlockDetector();
void Lock(MutexType t);
void Unlock(MutexType t);
void CheckNoLocks();
private:
u64 seq_;
u64 locked_[MutexTypeCount];
};

void InitializeMutex();

// Checks that the current thread does not hold any runtime locks
// (e.g. when returning from an interceptor).
void CheckNoLocks(ThreadState *thr);

} // namespace __tsan

#endif // TSAN_MUTEX_H
41 changes: 31 additions & 10 deletions compiler-rt/lib/tsan/rtl/tsan_platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
C++ linux memory layout:
0000 0000 0000 - 03c0 0000 0000: protected
03c0 0000 0000 - 1000 0000 0000: shadow
1000 0000 0000 - 6000 0000 0000: protected
1000 0000 0000 - 3000 0000 0000: protected
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
4000 0000 0000 - 6000 0000 0000: protected
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
Expand All @@ -27,7 +29,9 @@ C++ COMPAT linux memory layout:
0400 0000 0000 - 1000 0000 0000: shadow
1000 0000 0000 - 2900 0000 0000: protected
2900 0000 0000 - 2c00 0000 0000: modules
2c00 0000 0000 - 6000 0000 0000: -
2c00 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
Expand All @@ -40,7 +44,9 @@ Go linux and darwin memory layout:
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 1000 0000 0000: -
1000 0000 0000 - 1380 0000 0000: shadow
1460 0000 0000 - 6000 0000 0000: -
1460 0000 0000 - 2000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7fff ffff ffff: -
Expand All @@ -51,7 +57,8 @@ Go windows memory layout:
00e0 0000 0000 - 0100 0000 0000: -
0100 0000 0000 - 0560 0000 0000: shadow
0560 0000 0000 - 0760 0000 0000: traces
0760 0000 0000 - 07ff ffff ffff: -
0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 07ff ffff ffff: -
*/

#ifndef TSAN_PLATFORM_H
Expand All @@ -68,20 +75,28 @@ static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
# if SANITIZER_WINDOWS
static const uptr kLinuxShadowMsk = 0x010000000000ULL;
# else
static const uptr kMetaShadow = 0x076000000000ULL;
static const uptr kMetaSize = 0x007000000000ULL;
# else // if SANITIZER_WINDOWS
static const uptr kLinuxShadowMsk = 0x200000000000ULL;
# endif
static const uptr kMetaShadow = 0x300000000000ULL;
static const uptr kMetaSize = 0x100000000000ULL;
# endif // if SANITIZER_WINDOWS
#else // defined(TSAN_GO)
static const uptr kMetaShadow = 0x300000000000ULL;
static const uptr kMetaSize = 0x100000000000ULL;
// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
// when memory addresses are of the 0x2axxxxxxxxxx form.
// The option is enabled with 'setarch x86_64 -L'.
#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
# if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
static const uptr kAppMemGapBeg = 0x2c0000000000ULL;
static const uptr kAppMemGapEnd = 0x7d0000000000ULL;
#else
# else
static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
# endif
#endif

static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
Expand All @@ -96,10 +111,16 @@ const uptr kTraceMemSize = 0x020000000000ULL;
// This has to be a macro to allow constant initialization of constants below.
#ifndef TSAN_GO
#define MemToShadow(addr) \
(((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
((((uptr)addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
#define MemToMeta(addr) \
(u32*)(((((uptr)addr) & ~(kLinuxAppMemMsk | (kMetaShadowCell - 1))) \
/ kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
#else
#define MemToShadow(addr) \
((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
(((((uptr)addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
#define MemToMeta(addr) \
(u32*)(((((uptr)addr) & ~(kMetaShadowCell - 1)) \
/ kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
#endif

static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
Expand Down
95 changes: 67 additions & 28 deletions compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,34 +61,49 @@ namespace __tsan {

const uptr kPageSize = 4096;

enum {
MemTotal = 0,
MemShadow = 1,
MemMeta = 2,
MemFile = 3,
MemMmap = 4,
MemTrace = 5,
MemHeap = 6,
MemOther = 7,
MemCount = 8,
};

void FillProfileCallback(uptr start, uptr rss, bool file,
uptr *mem, uptr stats_size) {
CHECK_EQ(7, stats_size);
mem[6] += rss; // total
mem[MemTotal] += rss;
start >>= 40;
if (start < 0x10) // shadow
mem[0] += rss;
else if (start >= 0x20 && start < 0x30) // compat modules
mem[file ? 1 : 2] += rss;
else if (start >= 0x7e) // modules
mem[file ? 1 : 2] += rss;
else if (start >= 0x60 && start < 0x62) // traces
mem[3] += rss;
else if (start >= 0x7d && start < 0x7e) // heap
mem[4] += rss;
else // other
mem[5] += rss;
if (start < 0x10)
mem[MemShadow] += rss;
else if (start >= 0x20 && start < 0x30)
mem[file ? MemFile : MemMmap] += rss;
else if (start >= 0x30 && start < 0x40)
mem[MemMeta] += rss;
else if (start >= 0x7e)
mem[file ? MemFile : MemMmap] += rss;
else if (start >= 0x60 && start < 0x62)
mem[MemTrace] += rss;
else if (start >= 0x7d && start < 0x7e)
mem[MemHeap] += rss;
else
mem[MemOther] += rss;
}

void WriteMemoryProfile(char *buf, uptr buf_size) {
uptr mem[7] = {};
uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
char *buf_pos = buf;
char *buf_end = buf + buf_size;
buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
"RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
"RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
" trace:%zd heap:%zd other:%zd\n",
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
mem[MemHeap] >> 20, mem[MemOther] >> 20);
struct mallinfo mi = __libc_mallinfo();
buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
"mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
Expand Down Expand Up @@ -123,9 +138,7 @@ static void ProtectRange(uptr beg, uptr end) {
Die();
}
}
#endif

#ifndef TSAN_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
Expand Down Expand Up @@ -184,6 +197,7 @@ static void MapRodata() {
}

void InitializeShadowMemory() {
// Map memory shadow.
uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
kLinuxShadowEnd - kLinuxShadowBeg);
if (shadow != kLinuxShadowBeg) {
Expand All @@ -192,23 +206,48 @@ void InitializeShadowMemory() {
"to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
Die();
}
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
kLinuxShadowBeg, kLinuxShadowEnd,
(kLinuxShadowEnd - kLinuxShadowBeg) >> 30);

// Map meta shadow.
if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) {
Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n",
kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow);
Die();
}
if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) {
Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize);
Die();
}
uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize);
if (meta != kMetaShadow) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie (%p, %p).\n", meta, kMetaShadow);
Die();
}
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30);

// Protect gaps.
const uptr kClosedLowBeg = 0x200000;
const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin),
kMetaShadow);

ProtectRange(kClosedLowBeg, kClosedLowEnd);
ProtectRange(kClosedMidBeg, kClosedMidEnd);
DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n",
kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
kLinuxShadowBeg, kLinuxShadowEnd,
(kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n",
kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
kLinuxAppMemBeg, kLinuxAppMemEnd,
(kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
DPrintf("stack %zx\n", (uptr)&shadow);
VPrintf(2, "stack: %zx\n", (uptr)&shadow);

MapRodata();
}
Expand Down
22 changes: 21 additions & 1 deletion compiler-rt/lib/tsan/rtl/tsan_rtl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ static void BackgroundThread(void *arg) {

fd_t mprof_fd = kInvalidFd;
if (flags()->profile_memory && flags()->profile_memory[0]) {
// FIXME(dvyukov): support stdout/stderr
InternalScopedBuffer<char> filename(4096);
internal_snprintf(filename.data(), filename.size(), "%s.%d",
flags()->profile_memory, (int)internal_getpid());
Expand All @@ -144,6 +145,7 @@ static void BackgroundThread(void *arg) {
}

u64 last_flush = NanoTime();
u64 last_rss_check = NanoTime();
uptr last_rss = 0;
for (int i = 0;
atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
Expand All @@ -160,7 +162,9 @@ static void BackgroundThread(void *arg) {
last_flush = NanoTime();
}
}
if (flags()->memory_limit_mb > 0) {
// GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0 && last_rss_check + 1000 * kMs2Ns < now) {
last_rss_check = now;
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
if (flags()->verbosity > 0) {
Expand Down Expand Up @@ -222,6 +226,22 @@ void MapShadow(uptr addr, uptr size) {
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);

// Meta shadow is 2:1, so tread carefully.
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
// windows wants 64K alignment
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
if (meta_end <= mapped_meta_end)
return;
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
mapped_meta_end = meta_end;
DPrintf("mapped meta shadow for (%p-%p) at (%p-%p)\n",
addr, addr+size, meta_begin, meta_end);
}

void MapThreadTrace(uptr addr, uptr size) {
Expand Down
86 changes: 9 additions & 77 deletions compiler-rt/lib/tsan/rtl/tsan_rtl.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,84 +44,14 @@
#include "tsan_platform.h"
#include "tsan_mutexset.h"
#include "tsan_ignoreset.h"
#include "tsan_stack_trace.h"

#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
#endif

namespace __tsan {

// Descriptor of user's memory block.
struct MBlock {
/*
u64 mtx : 1; // must be first
u64 lst : 44;
u64 stk : 31; // on word boundary
u64 tid : kTidBits;
u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
*/
u64 raw[2];

void Init(uptr siz, u32 tid, u32 stk) {
raw[0] = raw[1] = 0;
raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
raw[0] |= (u64)stk << (1 + 44);
raw[1] |= (u64)stk >> (64 - 44 - 1);
DCHECK_EQ(Size(), siz);
DCHECK_EQ(Tid(), tid);
DCHECK_EQ(StackId(), stk);
}

u32 Tid() const {
return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
}

uptr Size() const {
return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
}

u32 StackId() const {
return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
}

SyncVar *ListHead() const {
return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
}

void ListPush(SyncVar *v) {
SyncVar *lst = ListHead();
v->next = lst;
u64 x = (u64)v ^ (u64)lst;
x = (x >> 3) << 1;
raw[0] ^= x;
DCHECK_EQ(ListHead(), v);
}

SyncVar *ListPop() {
SyncVar *lst = ListHead();
SyncVar *nxt = lst->next;
lst->next = 0;
u64 x = (u64)lst ^ (u64)nxt;
x = (x >> 3) << 1;
raw[0] ^= x;
DCHECK_EQ(ListHead(), nxt);
return lst;
}

void ListReset() {
SyncVar *lst = ListHead();
u64 x = (u64)lst;
x = (x >> 3) << 1;
raw[0] ^= x;
DCHECK_EQ(ListHead(), 0);
}

void Lock();
void Unlock();
typedef GenericScopedLock<MBlock> ScopedLock;
};

#ifndef TSAN_GO
#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
const uptr kAllocatorSpace = 0x7d0000000000ULL;
Expand All @@ -131,7 +61,7 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.

struct MapUnmapCallback;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
Expand Down Expand Up @@ -457,6 +387,9 @@ struct ThreadState {
bool in_signal_handler;
SignalContext *signal_ctx;

DenseSlabAllocCache block_cache;
DenseSlabAllocCache sync_cache;

#ifndef TSAN_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
Expand Down Expand Up @@ -530,7 +463,7 @@ struct Context {
bool initialized;
bool after_multithreaded_fork;

SyncTab synctab;
MetaMap metamap;

Mutex report_mtx;
int nreported;
Expand Down Expand Up @@ -628,7 +561,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc);
void ForkChildAfter(ThreadState *thr, uptr pc);

void ReportRace(ThreadState *thr);
bool OutputReport(Context *ctx, const ScopedReport &srep);
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx,
const ScopedReport &srep,
const StackTrace &trace);
Expand Down Expand Up @@ -657,9 +590,8 @@ void PrintCurrentStackSlow(); // uses libunwind
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);

SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
bool write_lock, bool create);
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);

void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
Expand Down
69 changes: 44 additions & 25 deletions compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
trace.ObtainCurrent(thr, pc);
rep.AddStack(&trace, true);
rep.AddLocation(addr, 1);
OutputReport(ctx, rep);
OutputReport(thr, rep);
}

void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
Expand All @@ -72,10 +72,12 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
if (kCppMode && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}

Expand All @@ -88,45 +90,62 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
if (IsGlobalVar(addr))
return;
#endif
SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
if (s == 0)
return;
if (flags()->detect_deadlocks) {
Callback cb(thr, pc);
ctx->dd->MutexDestroy(&cb, &s->dd);
}
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
if (s == 0)
return;
if (flags()->detect_deadlocks) {
Callback cb(thr, pc);
ctx->dd->MutexDestroy(&cb, &s->dd);
ctx->dd->MutexInit(&cb, &s->dd);
}
bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
unlock_locked = true;
}
u64 mid = s->GetId();
u32 last_lock = s->last_lock;
if (!unlock_locked)
s->Reset(); // must not reset it before the report is printed
s->mtx.Unlock();
if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(s);
rep.AddMutex(mid);
StackTrace trace;
trace.ObtainCurrent(thr, pc);
rep.AddStack(&trace);
FastState last(s->last_lock);
FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
rep.AddStack(&trace, true);
rep.AddLocation(s->addr, 1);
OutputReport(ctx, rep);
rep.AddLocation(addr, 1);
OutputReport(thr, rep);
}
if (unlock_locked) {
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
if (s != 0) {
s->Reset();
s->mtx.Unlock();
}
}
thr->mset.Remove(s->GetId());
DestroyAndFree(s);
thr->mset.Remove(mid);
// s will be destroyed and freed in MetaMap::FreeBlock.
}

void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
bool report_double_lock = false;
Expand Down Expand Up @@ -170,7 +189,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
Expand Down Expand Up @@ -213,7 +232,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
bool report_bad_lock = false;
Expand Down Expand Up @@ -248,7 +267,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
bool report_bad_unlock = false;
Expand Down Expand Up @@ -279,7 +298,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
bool report_bad_unlock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
Expand Down Expand Up @@ -324,7 +343,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {

void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->owner_tid = SyncVar::kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
Expand All @@ -334,7 +353,7 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
Expand All @@ -361,7 +380,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
Expand All @@ -373,7 +392,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
Expand Down Expand Up @@ -465,7 +484,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
rep.AddStack(&stacks[i], true);
}
}
OutputReport(ctx, rep);
OutputReport(thr, rep);
}

} // namespace __tsan
40 changes: 27 additions & 13 deletions compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,8 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
mop->stack = SymbolizeStack(*stack);
mop->stack->suppressable = true;
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
u64 mid = this->AddMutex(d.id);
Expand Down Expand Up @@ -279,7 +280,7 @@ u64 ScopedReport::AddMutex(u64 id) {
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false);
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
// Check that the mutex is still alive.
// Another mutex can be created at the same address,
// so check uid as well.
Expand All @@ -290,7 +291,7 @@ u64 ScopedReport::AddMutex(u64 id) {
AddDeadMutex(id);
}
if (s)
s->mtx.ReadUnlock();
s->mtx.Unlock();
return mid;
}

Expand Down Expand Up @@ -330,21 +331,26 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
return;
}
MBlock *b = 0;
if (allocator()->PointerIsMine((void*)addr)
&& (b = user_mblock(0, (void*)addr))) {
ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
Allocator *a = allocator();
if (a->PointerIsMine((void*)addr)) {
void *block_begin = a->GetBlockBegin((void*)addr);
if (block_begin)
b = ctx->metamap.GetBlock((uptr)block_begin);
}
if (b != 0) {
ThreadContext *tctx = FindThreadByTidLocked(b->tid);
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
loc->type = ReportLocationHeap;
loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
loc->size = b->Size();
loc->tid = tctx ? tctx->tid : b->Tid();
loc->size = b->siz;
loc->tid = tctx ? tctx->tid : b->tid;
loc->name = 0;
loc->file = 0;
loc->line = 0;
loc->stack = 0;
loc->stack = SymbolizeStackId(b->StackId());
loc->stack = SymbolizeStackId(b->stk);
if (tctx)
AddThread(tctx);
return;
Expand Down Expand Up @@ -500,7 +506,7 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
}
}

bool OutputReport(Context *ctx, const ScopedReport &srep) {
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
const ReportDesc *rep = srep.GetReport();
Suppression *supp = 0;
Expand All @@ -517,8 +523,14 @@ bool OutputReport(Context *ctx, const ScopedReport &srep) {
FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
ctx->fired_suppressions.push_back(s);
}
if (OnReport(rep, suppress_pc != 0))
return false;
{
bool old_is_freeing = thr->is_freeing;
thr->is_freeing = false;
bool suppressed = OnReport(rep, suppress_pc != 0);
thr->is_freeing = old_is_freeing;
if (suppressed)
return false;
}
PrintReport(rep);
ctx->nreported++;
if (flags()->halt_on_error)
Expand Down Expand Up @@ -616,6 +628,8 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
}

void ReportRace(ThreadState *thr) {
CheckNoLocks(thr);

// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
Expand Down Expand Up @@ -700,7 +714,7 @@ void ReportRace(ThreadState *thr) {
}
#endif

if (!OutputReport(ctx, rep))
if (!OutputReport(thr, rep))
return;

AddRacyStacks(thr, traces, addr_min, addr_max);
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ void ThreadFinalize(ThreadState *thr) {
ScopedReport rep(ReportTypeThreadLeak);
rep.AddThread(leaks[i].tctx, true);
rep.SetCount(leaks[i].count);
OutputReport(ctx, rep);
OutputReport(thr, rep);
}
#endif
}
Expand Down
112 changes: 112 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
//===-- tsan_stack_trace.cc -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
//#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_stack_trace.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"

namespace __tsan {

StackTrace::StackTrace()
: n_()
, s_()
, c_() {
}

StackTrace::StackTrace(uptr *buf, uptr cnt)
: n_()
, s_(buf)
, c_(cnt) {
CHECK_NE(buf, 0);
CHECK_NE(cnt, 0);
}

StackTrace::~StackTrace() {
Reset();
}

void StackTrace::Reset() {
if (s_ && !c_) {
CHECK_NE(n_, 0);
internal_free(s_);
s_ = 0;
}
n_ = 0;
}

void StackTrace::Init(const uptr *pcs, uptr cnt) {
Reset();
if (cnt == 0)
return;
if (c_) {
CHECK_NE(s_, 0);
CHECK_LE(cnt, c_);
} else {
s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
}
n_ = cnt;
internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
}

void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
Reset();
n_ = thr->shadow_stack_pos - thr->shadow_stack;
if (n_ + !!toppc == 0)
return;
uptr start = 0;
if (c_) {
CHECK_NE(s_, 0);
if (n_ + !!toppc > c_) {
start = n_ - c_ + !!toppc;
n_ = c_ - !!toppc;
}
} else {
// Cap potentially huge stacks.
if (n_ + !!toppc > kTraceStackSize) {
start = n_ - kTraceStackSize + !!toppc;
n_ = kTraceStackSize - !!toppc;
}
s_ = (uptr*)internal_alloc(MBlockStackTrace,
(n_ + !!toppc) * sizeof(s_[0]));
}
for (uptr i = 0; i < n_; i++)
s_[i] = thr->shadow_stack[start + i];
if (toppc) {
s_[n_] = toppc;
n_++;
}
}

void StackTrace::CopyFrom(const StackTrace& other) {
Reset();
Init(other.Begin(), other.Size());
}

bool StackTrace::IsEmpty() const {
return n_ == 0;
}

uptr StackTrace::Size() const {
return n_;
}

uptr StackTrace::Get(uptr i) const {
CHECK_LT(i, n_);
return s_[i];
}

const uptr *StackTrace::Begin() const {
return s_;
}

} // namespace __tsan
54 changes: 54 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_stack_trace.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_STACK_TRACE_H
#define TSAN_STACK_TRACE_H

//#include "sanitizer_common/sanitizer_atomic.h"
//#include "sanitizer_common/sanitizer_common.h"
//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
//#include "tsan_clock.h"
//#include "tsan_mutex.h"
//#include "tsan_dense_alloc.h"

namespace __tsan {

class StackTrace {
public:
StackTrace();
// Initialized the object in "static mode",
// in this mode it never calls malloc/free but uses the provided buffer.
StackTrace(uptr *buf, uptr cnt);
~StackTrace();
void Reset();

void Init(const uptr *pcs, uptr cnt);
void ObtainCurrent(ThreadState *thr, uptr toppc);
bool IsEmpty() const;
uptr Size() const;
uptr Get(uptr i) const;
const uptr *Begin() const;
void CopyFrom(const StackTrace& other);

private:
uptr n_;
uptr *s_;
const uptr c_;

StackTrace(const StackTrace&);
void operator = (const StackTrace&);
};

} // namespace __tsan

#endif // TSAN_STACK_TRACE_H
407 changes: 153 additions & 254 deletions compiler-rt/lib/tsan/rtl/tsan_sync.cc

Large diffs are not rendered by default.

87 changes: 32 additions & 55 deletions compiler-rt/lib/tsan/rtl/tsan_sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,46 +16,21 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_clock.h"
#include "tsan_mutex.h"
#include "tsan_dense_alloc.h"

namespace __tsan {

class StackTrace {
public:
StackTrace();
// Initialized the object in "static mode",
// in this mode it never calls malloc/free but uses the provided buffer.
StackTrace(uptr *buf, uptr cnt);
~StackTrace();
void Reset();

void Init(const uptr *pcs, uptr cnt);
void ObtainCurrent(ThreadState *thr, uptr toppc);
bool IsEmpty() const;
uptr Size() const;
uptr Get(uptr i) const;
const uptr *Begin() const;
void CopyFrom(const StackTrace& other);

private:
uptr n_;
uptr *s_;
const uptr c_;

StackTrace(const StackTrace&);
void operator = (const StackTrace&);
};

struct SyncVar {
explicit SyncVar(uptr addr, u64 uid);
SyncVar();

static const int kInvalidTid = -1;

uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
uptr addr;
const u64 uid; // Globally unique id.
u64 uid; // Globally unique id.
u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
Expand All @@ -64,13 +39,16 @@ struct SyncVar {
bool is_recursive;
bool is_broken;
bool is_linker_init;
SyncVar *next; // In SyncTab hashtable.
u32 next; // in MetaMap
DDMutex dd;
SyncClock read_clock; // Used for rw mutexes only.
// The clock is placed last, so that it is situated on a different cache line
// with the mtx. This reduces contention for hot sync objects.
SyncClock clock;

void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
void Reset();

u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
Expand All @@ -85,40 +63,39 @@ struct SyncVar {
}
};

class SyncTab {
/* MetaMap allows to map arbitrary user pointers onto various descriptors.
Currently it maps pointers to heap block descriptors and sync var descs.
It uses 1/2 direct shadow, see tsan_platform.h.
*/
class MetaMap {
public:
SyncTab();
~SyncTab();
MetaMap();

void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
MBlock* GetBlock(uptr p);

SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
SyncVar* GetIfExistsAndLock(uptr addr);

// If the SyncVar does not exist, returns 0.
SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
void MoveMemory(uptr src, uptr dst, uptr sz);

SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
void OnThreadIdle(ThreadState *thr);

private:
struct Part {
Mutex mtx;
SyncVar *val;
char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
Part();
};

// FIXME: Implement something more sane.
static const int kPartCount = 1009;
Part tab_[kPartCount];
static const u32 kFlagMask = 3 << 30;
static const u32 kFlagBlock = 1 << 30;
static const u32 kFlagSync = 2 << 30;
typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
BlockAlloc block_alloc_;
SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;

int PartIdx(uptr addr);

SyncVar* GetAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock, bool create);

SyncTab(const SyncTab&); // Not implemented.
void operator = (const SyncTab&); // Not implemented.
SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
bool create);
};

} // namespace __tsan
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/lib/tsan/rtl/tsan_trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

#include "tsan_defs.h"
#include "tsan_mutex.h"
#include "tsan_sync.h"
#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"

namespace __tsan {
Expand Down
6 changes: 5 additions & 1 deletion compiler-rt/lib/tsan/rtl/tsan_vector.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,10 @@ class Vector {
}

void Resize(uptr size) {
if (size == 0) {
end_ = begin_;
return;
}
uptr old_size = Size();
EnsureSize(size);
if (old_size < size) {
Expand All @@ -100,7 +104,7 @@ class Vector {
return;
}
uptr cap0 = last_ - begin_;
uptr cap = 2 * cap0;
uptr cap = cap0 * 5 / 4; // 25% growth
if (cap == 0)
cap = 16;
if (cap < size)
Expand Down
55 changes: 55 additions & 0 deletions compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
//===-- tsan_dense_alloc_test.cc ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_dense_alloc.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
#include "gtest/gtest.h"

#include <stdlib.h>
#include <stdint.h>
#include <map>

namespace __tsan {

TEST(DenseSlabAlloc, Basic) {
typedef DenseSlabAlloc<int, 128, 128> Alloc;
typedef Alloc::Cache Cache;
typedef Alloc::IndexT IndexT;
const int N = 1000;

Alloc alloc;
Cache cache;
alloc.InitCache(&cache);

IndexT blocks[N];
for (int ntry = 0; ntry < 3; ntry++) {
for (int i = 0; i < N; i++) {
IndexT idx = alloc.Alloc(&cache);
blocks[i] = idx;
EXPECT_NE(idx, 0);
int *v = alloc.Map(idx);
*v = i;
}

for (int i = 0; i < N; i++) {
IndexT idx = blocks[i];
int *v = alloc.Map(idx);
EXPECT_EQ(*v, i);
alloc.Free(&cache, idx);
}

alloc.FlushCache(&cache);
}
}

} // namespace __tsan
16 changes: 2 additions & 14 deletions compiler-rt/lib/tsan/tests/unit/tsan_mman_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,20 +51,8 @@ TEST(Mman, User) {
char *p2 = (char*)user_alloc(thr, pc, 20);
EXPECT_NE(p2, (char*)0);
EXPECT_NE(p2, p);
MBlock *b = user_mblock(thr, p);
EXPECT_NE(b, (MBlock*)0);
EXPECT_EQ(b->Size(), (uptr)10);
MBlock *b2 = user_mblock(thr, p2);
EXPECT_NE(b2, (MBlock*)0);
EXPECT_EQ(b2->Size(), (uptr)20);
for (int i = 0; i < 10; i++) {
p[i] = 42;
EXPECT_EQ(b, user_mblock(thr, p + i));
}
for (int i = 0; i < 20; i++) {
((char*)p2)[i] = 42;
EXPECT_EQ(b2, user_mblock(thr, p2 + i));
}
EXPECT_EQ(user_alloc_usable_size(thr, pc, p), (uptr)10);
EXPECT_EQ(user_alloc_usable_size(thr, pc, p2), (uptr)20);
user_free(thr, pc, p);
user_free(thr, pc, p2);
}
Expand Down
127 changes: 87 additions & 40 deletions compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,53 +12,100 @@
//===----------------------------------------------------------------------===//
#include "tsan_sync.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
#include "gtest/gtest.h"

#include <stdlib.h>
#include <stdint.h>
#include <map>

namespace __tsan {

TEST(Sync, Table) {
const uintptr_t kIters = 512*1024;
const uintptr_t kRange = 10000;
TEST(MetaMap, Basic) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block[1] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
MBlock *mb = m->GetBlock((uptr)&block[0]);
EXPECT_NE(mb, (MBlock*)0);
EXPECT_EQ(mb->siz, 1 * sizeof(u64));
EXPECT_EQ(mb->tid, thr->tid);
uptr sz = m->FreeBlock(thr, 0, (uptr)&block[0]);
EXPECT_EQ(sz, 1 * sizeof(u64));
mb = m->GetBlock((uptr)&block[0]);
EXPECT_EQ(mb, (MBlock*)0);
}

TEST(MetaMap, FreeRange) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block[4] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64));
m->AllocBlock(thr, 0, (uptr)&block[1], 3 * sizeof(u64));
MBlock *mb1 = m->GetBlock((uptr)&block[0]);
EXPECT_EQ(mb1->siz, 1 * sizeof(u64));
MBlock *mb2 = m->GetBlock((uptr)&block[1]);
EXPECT_EQ(mb2->siz, 3 * sizeof(u64));
m->FreeRange(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
mb1 = m->GetBlock((uptr)&block[0]);
EXPECT_EQ(mb1, (MBlock*)0);
mb2 = m->GetBlock((uptr)&block[1]);
EXPECT_EQ(mb2, (MBlock*)0);
}

TEST(MetaMap, Sync) {
ThreadState *thr = cur_thread();
uptr pc = 0;
MetaMap *m = &ctx->metamap;
u64 block[4] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64));
SyncVar *s1 = m->GetIfExistsAndLock((uptr)&block[0]);
EXPECT_EQ(s1, (SyncVar*)0);
s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[0], true);
EXPECT_NE(s1, (SyncVar*)0);
EXPECT_EQ(s1->addr, (uptr)&block[0]);
s1->mtx.Unlock();
SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block[1], false);
EXPECT_NE(s2, (SyncVar*)0);
EXPECT_EQ(s2->addr, (uptr)&block[1]);
s2->mtx.ReadUnlock();
m->FreeBlock(thr, 0, (uptr)&block[0]);
s1 = m->GetIfExistsAndLock((uptr)&block[0]);
EXPECT_EQ(s1, (SyncVar*)0);
s2 = m->GetIfExistsAndLock((uptr)&block[1]);
EXPECT_EQ(s2, (SyncVar*)0);
m->OnThreadIdle(thr);
}

SyncTab tab;
SyncVar *golden[kRange] = {};
unsigned seed = 0;
for (uintptr_t i = 0; i < kIters; i++) {
uintptr_t addr = rand_r(&seed) % (kRange - 1) + 1;
if (rand_r(&seed) % 2) {
// Get or add.
SyncVar *v = tab.GetOrCreateAndLock(thr, pc, addr, true);
EXPECT_TRUE(golden[addr] == 0 || golden[addr] == v);
EXPECT_EQ(v->addr, addr);
golden[addr] = v;
v->mtx.Unlock();
} else {
// Remove.
SyncVar *v = tab.GetAndRemove(thr, pc, addr);
EXPECT_EQ(golden[addr], v);
if (v) {
EXPECT_EQ(v->addr, addr);
golden[addr] = 0;
DestroyAndFree(v);
}
}
}
for (uintptr_t addr = 0; addr < kRange; addr++) {
if (golden[addr] == 0)
continue;
SyncVar *v = tab.GetAndRemove(thr, pc, addr);
EXPECT_EQ(v, golden[addr]);
EXPECT_EQ(v->addr, addr);
DestroyAndFree(v);
}
TEST(MetaMap, MoveMemory) {
ThreadState *thr = cur_thread();
MetaMap *m = &ctx->metamap;
u64 block1[4] = {}; // fake malloc block
u64 block2[4] = {}; // fake malloc block
m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64));
m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64));
SyncVar *s1 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[0], true);
s1->mtx.Unlock();
SyncVar *s2 = m->GetOrCreateAndLock(thr, 0, (uptr)&block1[1], true);
s2->mtx.Unlock();
m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64));
MBlock *mb1 = m->GetBlock((uptr)&block1[0]);
EXPECT_EQ(mb1, (MBlock*)0);
MBlock *mb2 = m->GetBlock((uptr)&block1[3]);
EXPECT_EQ(mb2, (MBlock*)0);
mb1 = m->GetBlock((uptr)&block2[0]);
EXPECT_NE(mb1, (MBlock*)0);
EXPECT_EQ(mb1->siz, 3 * sizeof(u64));
mb2 = m->GetBlock((uptr)&block2[3]);
EXPECT_NE(mb2, (MBlock*)0);
EXPECT_EQ(mb2->siz, 1 * sizeof(u64));
s1 = m->GetIfExistsAndLock((uptr)&block1[0]);
EXPECT_EQ(s1, (SyncVar*)0);
s2 = m->GetIfExistsAndLock((uptr)&block1[1]);
EXPECT_EQ(s2, (SyncVar*)0);
s1 = m->GetIfExistsAndLock((uptr)&block2[0]);
EXPECT_NE(s1, (SyncVar*)0);
EXPECT_EQ(s1->addr, (uptr)&block2[0]);
s1->mtx.Unlock();
s2 = m->GetIfExistsAndLock((uptr)&block2[1]);
EXPECT_NE(s2, (SyncVar*)0);
EXPECT_EQ(s2->addr, (uptr)&block2[1]);
s2->mtx.Unlock();
m->FreeRange(thr, 0, (uptr)&block2[0], 4 * sizeof(u64));
}

} // namespace __tsan
12 changes: 9 additions & 3 deletions compiler-rt/test/tsan/java_alloc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,20 @@ void *Thread(void *p) {
}

int main() {
jptr jheap = (jptr)malloc(kHeapSize);
jptr jheap = (jptr)malloc(kHeapSize + 8) + 8;
__tsan_java_init(jheap, kHeapSize);
pthread_t th;
pthread_create(&th, 0, Thread, (void*)(jheap + kHeapSize / 4));
stress(jheap);
pthread_join(th, 0);
printf("OK\n");
return __tsan_java_fini();
if (__tsan_java_fini() != 0) {
printf("FAILED\n");
return 1;
}
printf("DONE\n");
return 0;
}

// CHECK-NOT: WARNING: ThreadSanitizer: data race
// CHECK-NOT: FAILED
// CHECK: DONE
2 changes: 1 addition & 1 deletion compiler-rt/test/tsan/java_lock_rec_race.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ void *Thread(void *p) {

int main() {
int const kHeapSize = 1024 * 1024;
void *jheap = malloc(kHeapSize);
void *jheap = (char*)malloc(kHeapSize + 8) + 8;
__tsan_java_init((jptr)jheap, kHeapSize);
const int kBlockSize = 16;
__tsan_java_alloc((jptr)jheap, kBlockSize);
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/test/tsan/java_race.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ void *Thread(void *p) {

int main() {
int const kHeapSize = 1024 * 1024;
void *jheap = malloc(kHeapSize);
void *jheap = (char*)malloc(kHeapSize + 8) + 8;
__tsan_java_init((jptr)jheap, kHeapSize);
const int kBlockSize = 16;
__tsan_java_alloc((jptr)jheap, kBlockSize);
Expand Down
2 changes: 1 addition & 1 deletion compiler-rt/test/tsan/java_race_move.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ void *Thread(void *p) {

int main() {
int const kHeapSize = 1024 * 1024;
void *jheap = malloc(kHeapSize);
void *jheap = (char*)malloc(kHeapSize + 8) + 8;
__tsan_java_init((jptr)jheap, kHeapSize);
const int kBlockSize = 64;
int const kMove = 1024;
Expand Down
11 changes: 6 additions & 5 deletions compiler-rt/test/tsan/mutexset7.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,13 @@ void *Thread1(void *x) {
}

void *Thread2(void *x) {
pthread_mutex_t mtx;
pthread_mutex_init(&mtx, 0);
pthread_mutex_lock(&mtx);
pthread_mutex_t *mtx = new pthread_mutex_t;
pthread_mutex_init(mtx, 0);
pthread_mutex_lock(mtx);
Global--;
pthread_mutex_unlock(&mtx);
pthread_mutex_destroy(&mtx);
pthread_mutex_unlock(mtx);
pthread_mutex_destroy(mtx);
delete mtx;
return NULL;
}

Expand Down
26 changes: 0 additions & 26 deletions compiler-rt/test/tsan/oob_race.cc

This file was deleted.