Skip to content

Commit

Permalink
race-free version with specific fake locks.
Browse files Browse the repository at this point in the history
  • Loading branch information
John Mellor-Crummey committed Jan 7, 2018
1 parent 1c6863f commit 43dd5eb
Show file tree
Hide file tree
Showing 14 changed files with 240 additions and 126 deletions.
72 changes: 47 additions & 25 deletions common/h/IBSTree-fast.h
Expand Up @@ -47,6 +47,7 @@
#include <boost/thread/shared_mutex.hpp>
#include <boost/thread/shared_lock_guard.hpp>

#include <race-detector-annotations.h>

namespace Dyninst
{
Expand Down Expand Up @@ -105,59 +106,76 @@ namespace Dyninst
template <class ITYPE>
void IBSTree_fast<ITYPE>::insert(ITYPE* entry)
{
// suppress race reports from shared lock guard initialization inside boost
race_detector_fake_lock_acquire(race_detector_fake_lock(*this));
{
boost::lock_guard<IBSTree_fast<ITYPE> > u(*this);
// find in overlapping first
std::set<ITYPE*> dummy;
if(overlapping_intervals.find(entry, dummy))
{
overlapping_intervals.insert(entry);
return;
}
typename interval_set::iterator lower =
unique_intervals.upper_bound(entry->low());
// lower.high first >= entry.low
if(lower != unique_intervals.end() && (**lower == *entry)) return;
typename interval_set::iterator upper = lower;
while(upper != unique_intervals.end() &&
(*upper)->low() <= entry->high())
{
overlapping_intervals.insert(*upper);
++upper;
}
if(upper != lower)
{
unique_intervals.erase(lower, upper);
overlapping_intervals.insert(entry);
}
else
{
unique_intervals.insert(entry);
}
} else {
typename interval_set::iterator lower =
unique_intervals.upper_bound(entry->low());
// lower.high first >= entry.low
if(lower != unique_intervals.end() && (**lower == *entry)) return;
typename interval_set::iterator upper = lower;
while(upper != unique_intervals.end() &&
(*upper)->low() <= entry->high())
{
overlapping_intervals.insert(*upper);
++upper;
}
if(upper != lower)
{
unique_intervals.erase(lower, upper);
overlapping_intervals.insert(entry);
}
else
{
unique_intervals.insert(entry);
}
}
}
race_detector_fake_lock_release(race_detector_fake_lock(*this));
}
template <class ITYPE>
void IBSTree_fast<ITYPE>::remove(ITYPE* entry)
{
// race_detector_fake_lock_acquire(race_detector_fake_lock(*this));
{
boost::lock_guard<IBSTree_fast<ITYPE> > u(*this);
overlapping_intervals.remove(entry);
typename interval_set::iterator found = unique_intervals.find(entry->high());
if(found != unique_intervals.end() && *found == entry) unique_intervals.erase(found);
}
// race_detector_fake_lock_release(race_detector_fake_lock(*this));
}
template<class ITYPE>
int IBSTree_fast<ITYPE>::find(interval_type X, std::set<ITYPE*> &results) const
{
int count = 0;
// suppress race reports from shared lock guard initialization inside boost
race_detector_fake_lock_acquire(race_detector_fake_lock(*this));
do
{
boost::shared_lock_guard<const IBSTree_fast<ITYPE> > g(*this);
int num_old_results = results.size();

int num_overlapping = overlapping_intervals.find(X, results);
if(num_overlapping > 0) return num_overlapping;
if(num_overlapping > 0) { count = num_overlapping; break; }

typename interval_set::const_iterator found_unique = unique_intervals.upper_bound(X);
if(found_unique != unique_intervals.end())
{
if((*found_unique)->low() > X) return 0;
if((*found_unique)->low() > X) { count = 0; break; }
results.insert(*found_unique);
}
return results.size() - num_old_results;
count = results.size() - num_old_results;
} while (0);
race_detector_fake_lock_release(race_detector_fake_lock(*this));
return count;
}
template <typename ITYPE>
int IBSTree_fast<ITYPE>::find(ITYPE* I, std::set<ITYPE*>&results) const
Expand Down Expand Up @@ -209,9 +227,13 @@ namespace Dyninst
template <typename ITYPE>
void IBSTree_fast<ITYPE>::clear()
{
// race_detector_fake_lock_acquire(race_detector_fake_lock(*this));
{
boost::lock_guard<IBSTree_fast<ITYPE> > u(*this);
overlapping_intervals.clear();
unique_intervals.clear();
}
// race_detector_fake_lock_release(race_detector_fake_lock(*this));
}

}
Expand Down
10 changes: 6 additions & 4 deletions common/h/race-detector-annotations.h
Expand Up @@ -8,16 +8,18 @@ extern "C" {
#endif

#ifdef ENABLE_RACE_DETECTION
COMMON_EXPORT void race_detector_fake_lock_acquire(void);
COMMON_EXPORT void race_detector_fake_lock_release(void);
COMMON_EXPORT void race_detector_fake_lock_acquire(void *fake_lock);
COMMON_EXPORT void race_detector_fake_lock_release(void *fake_lock);
COMMON_EXPORT void race_detector_forget_access_history(void *loc, unsigned int nbytes);

#else
#define race_detector_fake_lock_acquire()
#define race_detector_fake_lock_release()
#define race_detector_fake_lock_acquire(fake_lock)
#define race_detector_fake_lock_release(fake_lock)
#define race_detector_forget_access_history(loc, nbytes)
#endif

#define race_detector_fake_lock(lock) ((void *) &lock)

#ifdef __cplusplus
}
#endif
Expand Down
2 changes: 2 additions & 0 deletions common/src/arch-x86.C
Expand Up @@ -8770,7 +8770,9 @@ ia32_instruction &ia32_decode(unsigned int capa, const unsigned char *addr, ia32
default: break;
}

#if STUB_OUT_TO_AVOID_RACE
gotit->id = newID;
#endif
}

instruct.entry = gotit;
Expand Down
10 changes: 5 additions & 5 deletions common/src/mcs-lock.C
Expand Up @@ -33,7 +33,7 @@
void
mcs_lock(mcs_lock_t &l, mcs_node_t &me)
{
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(&l);

//--------------------------------------------------------------------
// initialize my queue node
Expand Down Expand Up @@ -80,7 +80,7 @@ mcs_lock(mcs_lock_t &l, mcs_node_t &me)
bool
mcs_trylock(mcs_lock_t &l, mcs_node_t &me)
{
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(&l);
//--------------------------------------------------------------------
// initialize my queue node
//--------------------------------------------------------------------
Expand All @@ -100,7 +100,7 @@ mcs_trylock(mcs_lock_t &l, mcs_node_t &me)
std::memory_order_acq_rel,
std::memory_order_relaxed);
if (!locked) {
race_detector_fake_lock_release();
race_detector_fake_lock_release(&l);
}
return locked;
}
Expand Down Expand Up @@ -131,7 +131,7 @@ mcs_unlock(mcs_lock_t &l, mcs_node_t &me)
// I removed myself from the queue; I will never have a
// successor, so I'm done
//------------------------------------------------------------------
race_detector_fake_lock_release();
race_detector_fake_lock_release(&l);
return;
}

Expand All @@ -143,5 +143,5 @@ mcs_unlock(mcs_lock_t &l, mcs_node_t &me)
}

atomic_store_explicit(&successor->blocked, false, std::memory_order_release);
race_detector_fake_lock_release();
race_detector_fake_lock_release(&l);
}
4 changes: 2 additions & 2 deletions common/src/pfq-rwlock.C
Expand Up @@ -80,7 +80,7 @@ pfq_rwlock_init(pfq_rwlock_t &l)
void
pfq_rwlock_read_lock(pfq_rwlock_t &l)
{
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(&l);
uint32_t ticket = atomic_fetch_add_explicit(&l.rin, READER_INCREMENT, std::memory_order_acq_rel);

if (ticket & WRITER_PRESENT) {
Expand All @@ -102,7 +102,7 @@ pfq_rwlock_read_unlock(pfq_rwlock_t &l)
if (ticket == atomic_load_explicit(&l.last, std::memory_order_acquire))
atomic_store_explicit(&l.whead->blocked, false, std::memory_order_release);
}
race_detector_fake_lock_release();
race_detector_fake_lock_release(&l);
}


Expand Down
18 changes: 4 additions & 14 deletions common/src/race-detector-annotations.C
Expand Up @@ -46,35 +46,25 @@



//****************************************************************************
// private data
//****************************************************************************

#ifdef __INTEL_COMPILER
static int fake_lock; // this is a placeholder only
#endif



//****************************************************************************
// public operations
//****************************************************************************


void
race_detector_fake_lock_acquire(void)
race_detector_fake_lock_acquire(void *fake_lock)
{
#ifdef __INTEL_COMPILER
__cilkscreen_acquire_lock(&fake_lock);
__cilkscreen_acquire_lock(fake_lock);
#endif
}


void
race_detector_fake_lock_release(void)
race_detector_fake_lock_release(void *fake_lock)
{
#ifdef __INTEL_COMPILER
__cilkscreen_release_lock(&fake_lock);
__cilkscreen_release_lock(fake_lock);
#endif
}

Expand Down
14 changes: 8 additions & 6 deletions common/src/singleton_object_pool.h
Expand Up @@ -38,6 +38,8 @@
#include "dthread.h"
#include "race-detector-annotations.h"

#define fake_singleton_pool_lock \
race_detector_fake_lock(singleton_object_pool::malloc)

// This is only safe for objects with nothrow constructors...
template <typename T, typename Alloc = boost::default_user_allocator_new_delete>
Expand All @@ -48,18 +50,18 @@ class singleton_object_pool

inline static void free(T* free_me)
{
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(fake_singleton_pool_lock);
parent_t::free(free_me);
race_detector_forget_access_history(free_me, sizeof(T));
race_detector_fake_lock_release();
race_detector_fake_lock_release(fake_singleton_pool_lock);
}

inline static T* malloc()
{
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(fake_singleton_pool_lock);
void* buf = parent_t::malloc();
race_detector_forget_access_history(buf, sizeof(T));
race_detector_fake_lock_release();
race_detector_fake_lock_release(fake_singleton_pool_lock);
return reinterpret_cast<T*>(buf);
}

Expand Down Expand Up @@ -136,9 +138,9 @@ class singleton_object_pool
inline static void destroy(T* const kill_me)
{
if(is_from(kill_me)) {
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(fake_singleton_pool_lock);
kill_me->~T();
race_detector_fake_lock_release();
race_detector_fake_lock_release(fake_singleton_pool_lock);
free(kill_me);
}
}
Expand Down
4 changes: 2 additions & 2 deletions dataflowAPI/src/debug_dataflow.C
Expand Up @@ -64,7 +64,7 @@ static int check_debug_flag(int &flag)
// use a fake lock to suppress reports about races associated with
// concurrent setting and checking of the initialized flag by
// code generated for the implementation of call_once.
race_detector_fake_lock_acquire();
race_detector_fake_lock_acquire(race_detector_fake_lock(df_debug_slicing));
static std::once_flag initialized;

#if defined(_MSC_VER)
Expand Down Expand Up @@ -101,7 +101,7 @@ static int check_debug_flag(int &flag)

});

race_detector_fake_lock_release();
race_detector_fake_lock_release(race_detector_fake_lock(df_debug_slicing));

#if defined(_MSC_VER)
#pragma warning(pop)
Expand Down
19 changes: 13 additions & 6 deletions parseAPI/src/ParseData.C
Expand Up @@ -40,9 +40,13 @@ using namespace Dyninst::ParseAPI;

void ParseFrame::set_status(Status s)
{
boost::lock_guard<ParseFrame> g(*this);
_status = s;
_pd->setFrameStatus(codereg,func->addr(),s);
race_detector_fake_lock_acquire(race_detector_fake_lock(_status));
{
boost::lock_guard<ParseFrame> g(*this);
_status.store(s);
_pd->setFrameStatus(codereg,func->addr(),s);
}
race_detector_fake_lock_release(race_detector_fake_lock(_status));
}

ParseWorkElem * ParseFrame::mkWork(
Expand Down Expand Up @@ -111,7 +115,9 @@ StandardParseData::findFuncs(CodeRegion * /* cr */, Address start,
int StandardParseData::findBlocks(CodeRegion * /* cr */, Address addr,
set<Block *> & blocks)
{
race_detector_fake_lock_acquire(race_detector_fake_lock(_rdata));
int ret = _rdata.findBlocks(addr,blocks);
race_detector_fake_lock_release(race_detector_fake_lock(_rdata));
return ret;
}

Expand Down Expand Up @@ -148,11 +154,12 @@ StandardParseData::record_frame(ParseFrame * pf)
void
StandardParseData::remove_frame(ParseFrame * pf)
{
tbb::concurrent_hash_map<Address, ParseFrame*>::accessor a;
if(_rdata.frame_map.find(a, pf->func->addr()))
race_detector_fake_lock_acquire(race_detector_fake_lock(_rdata));
{
_rdata.frame_map.erase(a);
tbb::concurrent_hash_map<Address, ParseFrame*>::accessor a;
if(_rdata.frame_map.find(a, pf->func->addr())) _rdata.frame_map.erase(a);
}
race_detector_fake_lock_release(race_detector_fake_lock(_rdata));
}

ParseFrame *
Expand Down

0 comments on commit 43dd5eb

Please sign in to comment.