Skip to content

Commit

Permalink
[tsan] First commit of ThreadSanitizer (TSan) run-time library.
Browse files Browse the repository at this point in the history
Algorithm description: http://code.google.com/p/thread-sanitizer/wiki/ThreadSanitizerAlgorithm

Status:
The tool is known to work on large real-life applications, but still has quite a few rough edges.
Nothing is guaranteed yet.

The tool works on x86_64 Linux.
Support for 64-bit MacOS 10.7+ is planned for late 2012.
Support for 32-bit OSes is doable, but problematic and not yet planed.

Further commits coming:
  - tests
  - makefiles
  - documentation
  - clang driver patch

The code was previously developed at http://code.google.com/p/data-race-test/source/browse/trunk/v2/
by Dmitry Vyukov and Kostya Serebryany with contributions from
Timur Iskhodzhanov, Alexander Potapenko, Alexey Samsonov and Evgeniy Stepanov.

llvm-svn: 156542
  • Loading branch information
kcc committed May 10, 2012
1 parent 1a65397 commit 4ad375f
Show file tree
Hide file tree
Showing 45 changed files with 7,978 additions and 0 deletions.
47 changes: 47 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_allocator.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
//===-- tsan_allocator-------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_allocator.h"

// Provisional implementation.
extern "C" void *__libc_malloc(__tsan::uptr size);
extern "C" void __libc_free(void *ptr);

namespace __tsan {

u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;

void AllocInit() {
}

void *Alloc(uptr sz) {
void *p = __libc_malloc(sz + sizeof(u64));
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}

void Free(void *p) {
CHECK_NE(p, (char*)0);
p = (char*)p - sizeof(u64);
CHECK_EQ(((u64*)p)[0], kBlockMagic);
((u64*)p)[0] = 0;
__libc_free(p);
}

void *AllocBlock(void *p) {
CHECK_NE(p, (void*)0);
u64 *pp = (u64*)((uptr)p & ~0x7);
for (; pp[0] != kBlockMagic; pp--) {}
return pp + 1;
}

} // namespace __tsan
29 changes: 29 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_allocator.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
//===-- tsan_allocator.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_ALLOCATOR_H
#define TSAN_ALLOCATOR_H

#include "tsan_defs.h"

namespace __tsan {

void AllocInit();
void *Alloc(uptr sz);
void Free(void *p); // Does not accept NULL.
// Given the pointer p into a valid allocated block,
// returns a pointer to the beginning of the block.
void *AllocBlock(void *p);

} // namespace __tsan

#endif // TSAN_ALLOCATOR_H
140 changes: 140 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_atomic.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Atomic operations. For now implies IA-32/Intel64.
//===----------------------------------------------------------------------===//

#ifndef TSAN_ATOMIC_H
#define TSAN_ATOMIC_H

#include "tsan_defs.h"

namespace __tsan {

const int kCacheLineSize = 64;

enum memory_order {
memory_order_relaxed = 1 << 0,
memory_order_consume = 1 << 1,
memory_order_acquire = 1 << 2,
memory_order_release = 1 << 3,
memory_order_acq_rel = 1 << 4,
memory_order_seq_cst = 1 << 5,
};

struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
};

struct atomic_uint64_t {
typedef u64 Type;
volatile Type val_dont_use;
};

struct atomic_uintptr_t {
typedef uptr Type;
volatile Type val_dont_use;
};

INLINE void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}

INLINE void atomic_thread_fence(memory_order) {
__asm__ __volatile__("mfence" ::: "memory");
}

INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
}

template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
atomic_signal_fence(memory_order_seq_cst);
v = a->val_dont_use;
atomic_signal_fence(memory_order_seq_cst);
}
return v;
}

template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
atomic_signal_fence(memory_order_seq_cst);
a->val_dont_use = v;
atomic_signal_fence(memory_order_seq_cst);
}
if (mo == memory_order_seq_cst)
atomic_thread_fence(memory_order_seq_cst);
}

template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, v);
}

template<typename T>
INLINE typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, -v);
}

INLINE uptr atomic_exchange(volatile atomic_uintptr_t *a, uptr v,
memory_order mo) {
__asm__ __volatile__("xchg %1, %0" : "+r"(v), "+m"(*a) : : "memory", "cc");
return v;
}

template<typename T>
INLINE bool atomic_compare_exchange_strong(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
Type cmpv = *cmp;
Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}

INLINE bool atomic_compare_exchange_weak(volatile atomic_uintptr_t *a,
uptr *cmp, uptr xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}

} // namespace __tsan

#endif // TSAN_ATOMIC_H
99 changes: 99 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_clock.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
//===-- tsan_clock.cc -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"

// It's possible to optimize clock operations for some important cases
// so that they are O(1). The cases include singletons, once's, local mutexes.
// First, SyncClock must be re-implemented to allow indexing by tid.
// It must not necessarily be a full vector clock, though. For example it may
// be a multi-level table.
// Then, each slot in SyncClock must contain a dirty bit (it's united with
// the clock value, so no space increase). The acquire algorithm looks
// as follows:
// void acquire(thr, tid, thr_clock, sync_clock) {
// if (!sync_clock[tid].dirty)
// return; // No new info to acquire.
// // This handles constant reads of singleton pointers and
// // stop-flags.
// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
// sync_clock[tid].dirty = false;
// sync_clock.dirty_count--;
// }
// The release operation looks as follows:
// void release(thr, tid, thr_clock, sync_clock) {
// // thr->sync_cache is a simple fixed-size hash-based cache that holds
// // several previous sync_clock's.
// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
// // The thread did no acquire operations since last release on this clock.
// // So update only the thread's slot (other slots can't possibly change).
// sync_clock[tid].clock = thr->epoch;
// if (sync_clock.dirty_count == sync_clock.cnt
// || (sync_clock.dirty_count == sync_clock.cnt - 1
// && sync_clock[tid].dirty == false))
// // All dirty flags are set, bail out.
// return;
// set all dirty bits, but preserve the thread's bit. // O(N)
// update sync_clock.dirty_count;
// return;
// }
// release_impl(thr_clock, sync_clock); // As usual, O(N).
// set all dirty bits, but preserve the thread's bit.
// // The previous step is combined with release_impl(), so that
// // we scan the arrays only once.
// update sync_clock.dirty_count;
// }

namespace __tsan {

ThreadClock::ThreadClock() {
nclk_ = 0;
for (uptr i = 0; i < (uptr)kMaxTid; i++)
clk_[i] = 0;
}

void ThreadClock::acquire(const SyncClock *src) {
DCHECK(nclk_ <= kMaxTid);
DCHECK(src->clk_.Size() <= kMaxTid);

const uptr nclk = src->clk_.Size();
if (nclk == 0)
return;
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
if (clk_[i] < src->clk_[i])
clk_[i] = src->clk_[i];
}
}

void ThreadClock::release(SyncClock *dst) const {
DCHECK(nclk_ <= kMaxTid);
DCHECK(dst->clk_.Size() <= kMaxTid);

if (dst->clk_.Size() < nclk_)
dst->clk_.Resize(nclk_);
for (uptr i = 0; i < nclk_; i++) {
if (dst->clk_[i] < clk_[i])
dst->clk_[i] = clk_[i];
}
}

void ThreadClock::acq_rel(SyncClock *dst) {
acquire(dst);
release(dst);
}

SyncClock::SyncClock()
: clk_(MBlockClock) {
}
} // namespace __tsan
79 changes: 79 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_clock.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_CLOCK_H
#define TSAN_CLOCK_H

#include "tsan_defs.h"
#include "tsan_vector.h"

namespace __tsan {

// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();

uptr size() const {
return clk_.Size();
}

void Reset() {
clk_.Reset();
}

private:
Vector<u64> clk_;
friend struct ThreadClock;
};

// The clock that lives in threads.
struct ThreadClock {
public:
ThreadClock();

u64 get(int tid) const {
DCHECK(tid < kMaxTid);
return clk_[tid];
}

void set(int tid, u64 v) {
DCHECK(tid < kMaxTid);
DCHECK(v >= clk_[tid]);
clk_[tid] = v;
if ((int)nclk_ <= tid)
nclk_ = tid + 1;
}

void tick(int tid) {
DCHECK(tid < kMaxTid);
clk_[tid]++;
if ((int)nclk_ <= tid)
nclk_ = tid + 1;
}

uptr size() const {
return nclk_;
}

void acquire(const SyncClock *src);
void release(SyncClock *dst) const;
void acq_rel(SyncClock *dst);

private:
uptr nclk_;
u64 clk_[kMaxTid];
};

} // namespace __tsan

#endif // TSAN_CLOCK_H
Loading

0 comments on commit 4ad375f

Please sign in to comment.