Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Cache: Fix dead locking problem.

First ... we made the mutex order go:

Line -> Cache -> And then weakly attempt other lines in the event of invalidating.

We also force the user to release handle manually

This is required because all our access patterns are using a pointer
just outside of a mutex. Thus, some other thread can show up and
delete the data behind our pointer.
  • Loading branch information...
commit d35093e35c3a8a23ecf605a0b8643914c84c6fc3 1 parent 509aee3
Zack Moratto authored
View
127 src/vw/Core/Cache.cc
@@ -23,36 +23,114 @@
#include <vw/Core/Cache.h>
#include <vw/Core/Debugging.h>
-void vw::Cache::allocate( size_t size ) {
- while( m_size+size > m_max_size ) {
- if( ! m_last_valid ) {
+void vw::Cache::allocate( size_t size, CacheLineBase* line ) {
+ // WARNING! YOU CAN NOT HOLD THE CACHE MUTEX AND THEN CALL
+ // INVALIDATE. That's a line -> cache -> line mutex hold. A
+ // deadlock!
+
+ // This step also implies the need to call validate.
+
+ uint64 local_evictions = 0;
+ size_t local_size, local_max_size;
+ CacheLineBase* local_last_valid;
+ size_t invalidate_loop_count = 0;
+ { // Locally buffer variables that should be accessed behind mutex
+ RecursiveMutex::Lock cache_lock( m_line_mgmt_mutex );
+ validate( line ); // Call here to insure that last_valid is not us!
+ m_size += size;
+ local_size = m_size;
+ local_max_size = m_max_size;
+ local_last_valid = m_last_valid;
+ VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache allocated " << size << " bytes (" << m_size << " / " << m_max_size << " used)" << "\n"; );
+ }
+ while ( local_size > local_max_size ) {
+ if ( local_last_valid == line ) {
+ // We're trying to deallocate self! Call validate again to get
+ // our line back on top since some other threads have moved us.
+ RecursiveMutex::Lock cache_lock( m_line_mgmt_mutex );
+ validate( line );
+ local_last_valid = m_last_valid;
+ }
+ if ( local_last_valid == line || !local_last_valid ) {
VW_OUT(WarningMessage, "console") << "Warning: Cached object (" << size << ") larger than requested maximum cache size (" << m_max_size << "). Current Size = " << m_size << "\n";
VW_OUT(WarningMessage, "cache") << "Warning: Cached object (" << size << ") larger than requested maximum cache size (" << m_max_size << "). Current Size = " << m_size << "\n";
break;
}
- m_last_valid->invalidate();
- m_evictions++;
+ bool invalidated = local_last_valid->try_invalidate();
+ if (invalidated) {
+ local_evictions++;
+ { // Update local buffer by grabbing cache buffer
+ RecursiveMutex::Lock cache_lock( m_line_mgmt_mutex );
+ local_size = m_size;
+ local_last_valid = m_last_valid;
+ }
+ } else {
+ RecursiveMutex::Lock cache_lock( m_line_mgmt_mutex );
+ local_last_valid = local_last_valid->m_prev;
+ local_size = m_size;
+ if (!local_last_valid) {
+ invalidate_loop_count++;
+ if ( invalidate_loop_count == 3 ) {
+ // We tried hard to deallocate enough to do our
+ // allocation. However there is a time to give up and just
+ // move on. In practice, even with this cop out, we do stay
+ // pretty close to our allocations amount.
+ break;
+ } {
+ RecursiveMutex::Lock cache_lock( m_line_mgmt_mutex );
+ local_last_valid = m_last_valid;
+ }
+ }
+ }
+ }
+ {
+ Mutex::WriteLock cache_lock( m_stats_mutex );
+ m_evictions += local_evictions;
}
- m_size += size;
- VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache allocated " << size << " bytes (" << m_size << " / " << m_max_size << " used)" << "\n"; )
}
void vw::Cache::resize( size_t size ) {
- Mutex::Lock lock(m_mutex);
- m_max_size = size;
- while( m_size > m_max_size ) {
- VW_ASSERT( m_last_valid, LogicErr() << "Cache is empty but has nonzero size!" );
- m_last_valid->invalidate();
+ // WARNING! YOU CAN NOT HOLD THE CACHE MUTEX AND THEN CALL
+ // INVALIDATE. That's a line -> cache -> line mutex hold. A
+ // deadlock!
+ size_t local_size, local_max_size;
+ CacheLineBase* local_last_valid;
+ { // Locally buffer variables that require Cache Mutex
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
+ m_max_size = size;
+ local_size = m_size;
+ local_max_size = m_max_size;
+ local_last_valid = m_last_valid;
+ }
+ while ( local_size > local_max_size ) {
+ VW_ASSERT( local_last_valid, LogicErr() << "Cache is empty but has nonzero size!" );
+ m_last_valid->invalidate(); // Problem ( probably grabs a line's mutex too )
+ { // Update local buffer by grabbing cache buffer
+ RecursiveMutex::Lock cache_lock( m_line_mgmt_mutex );
+ local_size = m_size;
+ local_last_valid = m_last_valid;
+ }
}
}
-void vw::Cache::deallocate( size_t size ) {
+size_t vw::Cache::max_size() {
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
+ return m_max_size;
+}
+
+void vw::Cache::deallocate( size_t size, CacheLineBase *line ) {
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
+
+ // This call implies the need to call invalidate
+ invalidate( line );
+
m_size -= size;
VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache deallocated " << size << " bytes (" << m_size << " / " << m_max_size << " used)" << "\n"; )
}
// Move the cache line to the top of the valid list.
void vw::Cache::validate( CacheLineBase *line ) {
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
if( line == m_first_valid ) return;
if( line == m_last_valid ) m_last_valid = line->m_prev;
if( line == m_first_invalid ) m_first_invalid = line->m_next;
@@ -67,6 +145,7 @@ void vw::Cache::validate( CacheLineBase *line ) {
// Move the cache line to the top of the invalid list.
void vw::Cache::invalidate( CacheLineBase *line ) {
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
if( line == m_first_valid ) m_first_valid = line->m_next;
if( line == m_last_valid ) m_last_valid = line->m_prev;
if( line->m_next ) line->m_next->m_prev = line->m_prev;
@@ -79,6 +158,7 @@ void vw::Cache::invalidate( CacheLineBase *line ) {
// Remove the cache line from the cache lists.
void vw::Cache::remove( CacheLineBase *line ) {
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
if( line == m_first_valid ) m_first_valid = line->m_next;
if( line == m_last_valid ) m_last_valid = line->m_prev;
if( line == m_first_invalid ) m_first_invalid = line->m_next;
@@ -89,6 +169,7 @@ void vw::Cache::remove( CacheLineBase *line ) {
// Move the cache line to the bottom of the valid list.
void vw::Cache::deprioritize( CacheLineBase *line ) {
+ RecursiveMutex::Lock cache_lock(m_line_mgmt_mutex);
if( line == m_last_valid ) return;
if( line == m_first_valid ) m_first_valid = line->m_next;
if( line->m_next ) line->m_next->m_prev = line->m_prev;
@@ -99,3 +180,23 @@ void vw::Cache::deprioritize( CacheLineBase *line ) {
m_last_valid = line;
}
+// Statistics request methods
+vw::uint64 vw::Cache::hits() {
+ Mutex::ReadLock cache_lock( m_stats_mutex );
+ return m_hits;
+}
+
+vw::uint64 vw::Cache::misses() {
+ Mutex::ReadLock cache_lock( m_stats_mutex );
+ return m_misses;
+}
+
+vw::uint64 vw::Cache::evictions() {
+ Mutex::ReadLock cache_lock( m_stats_mutex );
+ return m_evictions;
+}
+
+void vw::Cache::clear_stats() {
+ Mutex::WriteLock cache_lock( m_stats_mutex );
+ m_hits = m_misses = m_evictions = 0;
+}
View
132 src/vw/Core/Cache.h
@@ -112,109 +112,135 @@ namespace vw {
friend class Cache;
protected:
Cache& cache() const { return m_cache; }
- inline void allocate() { m_cache.allocate(m_size); }
- inline void deallocate() { m_cache.deallocate(m_size); }
+ inline void allocate() { m_cache.allocate(m_size, this); }
+ inline void deallocate() { m_cache.deallocate(m_size, this); }
inline void validate() { m_cache.validate(this); }
- inline void remove() { m_cache.remove( this ); }
+ inline void remove() { m_cache.remove(this); }
inline void deprioritize() { m_cache.deprioritize(this); }
public:
CacheLineBase( Cache& cache, size_t size ) : m_cache(cache), m_prev(0), m_next(0), m_size(size) {}
virtual ~CacheLineBase() {}
virtual inline void invalidate() { m_cache.invalidate(this); }
+ virtual inline bool try_invalidate() { m_cache.invalidate(this); return true; }
virtual size_t size() const { return m_size; }
};
friend class CacheLineBase;
// CacheLine<>
+ //
+ // Always follow the order of mutexs is:
+ // ACQUIRE LINE FIRST
+ // ACQUIRE CACHE's LINE MGMT SECOND
template <class GeneratorT>
class CacheLine : public CacheLineBase {
GeneratorT m_generator;
typedef typename boost::shared_ptr<typename core::detail::GenValue<GeneratorT>::type> value_type;
value_type m_value;
Mutex m_mutex; // Mutex for m_value and generation of this cache line
- unsigned m_generation_count;
+ uint64 m_generation_count;
public:
CacheLine( Cache& cache, GeneratorT const& generator )
: CacheLineBase(cache,core::detail::pointerish(generator)->size()), m_generator(generator), m_generation_count(0)
{
VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache creating CacheLine " << info() << "\n"; )
- Mutex::Lock cache_lock(cache.m_mutex);
CacheLineBase::invalidate();
}
virtual ~CacheLine() {
- Mutex::Lock cache_lock(cache().m_mutex);
invalidate();
VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache destroying CacheLine " << info() << "\n"; )
remove();
}
virtual void invalidate() {
- Mutex::Lock line_lock(m_mutex);
- if( ! m_value ) return;
+ Mutex::WriteLock line_lock(m_mutex);
+ if( !m_value ) return;
+
VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache invalidating CacheLine " << info() << "\n"; );
- CacheLineBase::invalidate();
- CacheLineBase::deallocate();
+ CacheLineBase::deallocate(); // Calls invalidate internally
+ m_value.reset();
+ }
+
+ virtual bool try_invalidate() {
+ bool have_lock = m_mutex.try_lock();
+ if ( !have_lock ) return false;
+ if ( !m_value ) {
+ m_mutex.unlock();
+ return true;
+ }
+
+ VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache invalidating CacheLine " << info() << "\n"; );
+ CacheLineBase::deallocate(); // Calls invalidate internally
m_value.reset();
+
+ m_mutex.unlock();
+ return true;
}
std::string info() {
+ Mutex::WriteLock line_lock(m_mutex);
std::ostringstream oss;
oss << typeid(this).name() << " " << this
<< " (size " << (int)size() << ", gen count " << m_generation_count << ")";
return oss.str();
}
+ // This grabs a lock and never releases it! User must unlock
+ // themselves because we are passing them a pointer to an object
+ // that another thread could delete.
value_type const& value() {
- bool hit = true;
- Mutex::Lock line_lock(m_mutex);
- if( !m_value ) {
- m_generation_count++;
- hit = false;
- VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache generating CacheLine " << info() << "\n"; )
- {
- Mutex::Lock cache_lock(cache().m_mutex);
- CacheLineBase::allocate();
+ m_mutex.lock();
+ bool hit = (bool)m_value;
+ {
+ { // This should be abstracted into a call
+ Mutex::WriteLock cache_lock( cache().m_stats_mutex );
+ if (hit)
+ cache().m_hits++;
+ else
+ cache().m_misses++;
}
+ }
+ if( !hit ) {
+ VW_CACHE_DEBUG( VW_OUT(DebugMessage, "cache") << "Cache generating CacheLine " << info() << "\n"; );
+ CacheLineBase::allocate(); // Call validate internally
+
+ // Watch is just used for debugging / info
ScopedWatch sw((std::string("Cache ")
+ (m_generation_count == 1 ? "generating " : "regenerating ")
+ typeid(this).name()).c_str());
+ m_generation_count++;
m_value = core::detail::pointerish(m_generator)->generate();
}
- {
- Mutex::Lock cache_lock(cache().m_mutex);
- CacheLineBase::validate();
- if (hit)
- cache().m_hits++;
- else
- cache().m_misses++;
- }
return m_value;
}
+ void release() {
+ m_mutex.unlock();
+ }
+
bool valid() {
- Mutex::Lock line_lock(m_mutex);
- return (bool)m_value;
+ Mutex::WriteLock line_lock(m_mutex);
+ return m_value;
}
void deprioritize() {
- Mutex::Lock line_lock(m_mutex);
- if( m_value ) {
- Mutex::Lock cache_lock(cache().m_mutex);
+ bool exists = valid();
+ if ( exists ) {
+ Mutex::WriteLock line_lock(m_mutex);
CacheLineBase::deprioritize();
}
}
};
-
CacheLineBase *m_first_valid, *m_last_valid, *m_first_invalid;
size_t m_size, m_max_size;
- Mutex m_mutex;
- vw::uint64 m_hits, m_misses, m_evictions;
+ RecursiveMutex m_line_mgmt_mutex;
+ Mutex m_stats_mutex;
+ volatile vw::uint64 m_hits, m_misses, m_evictions;
- void allocate( size_t size );
- void deallocate( size_t size );
+ void allocate( size_t size, CacheLineBase *line );
+ void deallocate( size_t size, CacheLineBase *line );
void validate( CacheLineBase *line );
void invalidate( CacheLineBase *line );
void remove( CacheLineBase *line );
@@ -226,23 +252,35 @@ namespace vw {
template <class GeneratorT>
class Handle {
boost::shared_ptr<CacheLine<GeneratorT> > m_line_ptr;
+ mutable bool m_is_locked;
public:
typedef typename core::detail::GenValue<GeneratorT>::type value_type;
- Handle() {}
- Handle( boost::shared_ptr<CacheLine<GeneratorT> > line_ptr ) : m_line_ptr(line_ptr) {}
+ Handle() : m_is_locked(false) {}
+ Handle( boost::shared_ptr<CacheLine<GeneratorT> > line_ptr ) : m_line_ptr(line_ptr), m_is_locked(false) {}
+ ~Handle() {
+ if (m_is_locked)
+ m_line_ptr->release();
+ }
boost::shared_ptr<value_type> operator->() const {
VW_ASSERT( m_line_ptr, NullPtrErr() << "Invalid cache handle!" );
+ m_is_locked = true;
return m_line_ptr->value();
}
value_type const& operator*() const {
VW_ASSERT( m_line_ptr, NullPtrErr() << "Invalid cache handle!" );
+ m_is_locked = true;
return *(m_line_ptr->value());
}
operator boost::shared_ptr<value_type>() const {
VW_ASSERT( m_line_ptr, NullPtrErr() << "Invalid cache handle!" );
+ m_is_locked = true;
return m_line_ptr->value();
}
+ void release() const {
+ m_is_locked = false;
+ m_line_ptr->release();
+ }
bool valid() const {
VW_ASSERT( m_line_ptr, NullPtrErr() << "Invalid cache handle!" );
return m_line_ptr->valid();
@@ -274,15 +312,13 @@ namespace vw {
}
void resize( size_t size );
- size_t max_size() { return m_max_size; }
-
- uint64 hits() const { return m_hits; }
- uint64 misses() const { return m_misses; }
- uint64 evictions() const {return m_evictions; }
- void clear_stats() {
- Mutex::Lock cache_lock(m_mutex);
- m_hits = m_misses = m_evictions = 0;
- }
+ size_t max_size();
+
+ // Statistics functions
+ uint64 hits();
+ uint64 misses();
+ uint64 evictions();
+ void clear_stats();
};
} // namespace vw
View
27 src/vw/Core/Thread.h
@@ -98,24 +98,27 @@ namespace vw {
public:
inline Mutex() {}
- void lock() { boost::shared_mutex::lock(); }
- void lock_shared() { boost::shared_mutex::lock_shared(); }
- void unlock() { boost::shared_mutex::unlock(); }
- void unlock_shared() { boost::shared_mutex::unlock_shared(); }
+ void lock() { boost::shared_mutex::lock(); }
+ void lock_shared() { boost::shared_mutex::lock_shared(); }
+ bool try_lock() { return boost::shared_mutex::try_lock(); }
+ bool try_lock_shared() { return boost::shared_mutex::try_lock_shared(); }
+ void unlock() { boost::shared_mutex::unlock(); }
+ void unlock_shared() { boost::shared_mutex::unlock_shared(); }
// A scoped lock class, used to lock and unlock a Mutex.
class WriteLock : private boost::unique_lock<Mutex>, private boost::noncopyable {
- public:
- inline WriteLock( Mutex &mutex ) : boost::unique_lock<Mutex>( mutex ) {}
- void lock() { boost::unique_lock<Mutex>::lock(); }
- void unlock() { boost::unique_lock<Mutex>::unlock(); }
+ public:
+ inline WriteLock( Mutex &mutex ) : boost::unique_lock<Mutex>( mutex ) {}
+ void lock() { boost::unique_lock<Mutex>::lock(); }
+ void unlock() { boost::unique_lock<Mutex>::unlock(); }
};
class ReadLock : private boost::shared_lock<Mutex>, private boost::noncopyable {
- public:
- inline ReadLock( Mutex &mutex ) : boost::shared_lock<Mutex>( mutex ) {}
- void lock() { boost::shared_lock<Mutex>::lock(); }
- void unlock() { boost::shared_lock<Mutex>::unlock(); }
+ public:
+ inline ReadLock( Mutex &mutex ) : boost::shared_lock<Mutex>( mutex ) {}
+ void lock() { boost::shared_lock<Mutex>::lock(); }
+ void unlock() { boost::shared_lock<Mutex>::unlock(); }
};
+
typedef class WriteLock Lock;
};
View
14 src/vw/Core/tests/TestCache.cxx
@@ -82,10 +82,12 @@ TEST_F(CacheTest, CacheLineLRU) {
ASSERT_FALSE(h.valid());
EXPECT_EQ(i, *h);
+ EXPECT_NO_THROW( h.release() );
EXPECT_TRUE(h.valid());
boost::shared_ptr<BlockGenerator::value_type> ptr = h;
EXPECT_EQ(i, *ptr);
+ h.release();
// LRU cache, so the last num_cache_blocks should still be valid
for (int j = 0; i-j >= 0; ++j)
@@ -107,6 +109,7 @@ TEST_F(CacheTest, Priority) {
// prime the cache
for (int i = 0; i < num_actual_blocks; ++i) {
EXPECT_EQ(i, *cache_handles[i]);
+ EXPECT_NO_THROW( cache_handles[i].release() );
}
//make sure last element is valid
@@ -119,6 +122,7 @@ TEST_F(CacheTest, Priority) {
// bring the first element back into cache
ASSERT_FALSE(cache_handles[0].valid());
EXPECT_EQ( 0, *cache_handles[0] );
+ EXPECT_NO_THROW( cache_handles[0].release() );
EXPECT_TRUE(cache_handles[0].valid());
// make sure that the deprioritized element dropped out of cache
@@ -154,8 +158,10 @@ TEST(Cache, Types) {
// value should have copied once
EXPECT_EQ(1, *h1);
+ EXPECT_NO_THROW( h1.release() );
// shared_ptr should not have copied
EXPECT_EQ(0, *h2);
+ EXPECT_NO_THROW( h2.release() );
}
TEST(Cache, Stats) {
@@ -175,6 +181,7 @@ TEST(Cache, Stats) {
// miss
EXPECT_EQ(0, *h[0]);
+ EXPECT_NO_THROW( h[0].release() );
EXPECT_EQ(0u, cache.hits());
EXPECT_EQ(1u, cache.misses());
@@ -182,6 +189,7 @@ TEST(Cache, Stats) {
// hit
EXPECT_EQ(0, *h[0]);
+ EXPECT_NO_THROW( h[0].release() );
EXPECT_EQ(1u, cache.hits());
EXPECT_EQ(1u, cache.misses());
@@ -189,6 +197,7 @@ TEST(Cache, Stats) {
// miss
EXPECT_EQ(1, *h[1]);
+ EXPECT_NO_THROW( h[1].release() );
EXPECT_EQ(1u, cache.hits());
EXPECT_EQ(2u, cache.misses());
@@ -196,6 +205,7 @@ TEST(Cache, Stats) {
// hit
EXPECT_EQ(1, *h[1]);
+ EXPECT_NO_THROW( h[1].release() );
EXPECT_EQ(2u, cache.hits());
EXPECT_EQ(2u, cache.misses());
@@ -203,6 +213,7 @@ TEST(Cache, Stats) {
// miss, eviction
EXPECT_EQ(2, *h[2]);
+ EXPECT_NO_THROW( h[2].release() );
EXPECT_EQ(2u, cache.hits());
EXPECT_EQ(3u, cache.misses());
@@ -210,6 +221,7 @@ TEST(Cache, Stats) {
// hit
EXPECT_EQ(2, *h[2]);
+ EXPECT_NO_THROW( h[2].release() );
EXPECT_EQ(3u, cache.hits());
EXPECT_EQ(3u, cache.misses());
@@ -217,6 +229,7 @@ TEST(Cache, Stats) {
// hit
EXPECT_EQ(1, *h[1]);
+ EXPECT_NO_THROW( h[1].release() );
EXPECT_EQ(4u, cache.hits());
EXPECT_EQ(3u, cache.misses());
@@ -224,6 +237,7 @@ TEST(Cache, Stats) {
// miss, eviction
EXPECT_EQ(0, *h[0]);
+ EXPECT_NO_THROW( h[0].release() );
EXPECT_EQ(4u, cache.hits());
EXPECT_EQ(4u, cache.misses());
View
4 src/vw/Image/BlockRasterize.h
@@ -138,8 +138,8 @@ namespace vw {
return m_bbox.width() * m_bbox.height() * m_child->planes() * sizeof(pixel_type);
}
- boost::shared_ptr<ImageView<pixel_type> > generate() const {
- boost::shared_ptr<ImageView<pixel_type> > ptr( new ImageView<pixel_type>( m_bbox.width(), m_bbox.height(), m_child->planes() ) );
+ boost::shared_ptr<value_type > generate() const {
+ boost::shared_ptr<value_type > ptr( new value_type( m_bbox.width(), m_bbox.height(), m_child->planes() ) );
m_child->rasterize( *ptr, m_bbox );
return ptr;
}

0 comments on commit d35093e

Please sign in to comment.
Something went wrong with that request. Please try again.