Skip to content

Commit

Permalink
Add a "freeze" lock mode to the SequenceLock
Browse files Browse the repository at this point in the history
And make the background and foreground flushing use it.
This new lock mode prevents write and exclusive locks, but does not invalidate read locks.
This reduces the negative impact the background flush task has on the performance of reads.
  • Loading branch information
chrisvest committed Jan 20, 2016
1 parent e2f1a84 commit 4fe9163
Show file tree
Hide file tree
Showing 4 changed files with 239 additions and 31 deletions.
Expand Up @@ -864,6 +864,7 @@ int evictPages( int pageCountToEvict, int clockArm, EvictionRunEvent evictionRun


private boolean evictPage( MuninnPage page, EvictionEvent evictionEvent ) private boolean evictPage( MuninnPage page, EvictionEvent evictionEvent )
{ {
//noinspection TryWithIdenticalCatches - this warning is a false positive; bug in Intellij inspection
try try
{ {
page.evict( evictionEvent ); page.evict( evictionEvent );
Expand Down Expand Up @@ -972,14 +973,14 @@ private long flushAtIORatio( double ratio )
// counters fast enough to reach zero. // counters fast enough to reach zero.


// Skip the page if it is already write locked, or not dirty, or too popular. // Skip the page if it is already write locked, or not dirty, or too popular.
boolean thisPageIsDirty = false; boolean thisPageIsDirty;
if ( !(thisPageIsDirty = page.isDirty()) || !page.decrementUsage() ) if ( !(thisPageIsDirty = page.isDirty()) || !page.decrementUsage() )
{ {
seenDirtyPages |= thisPageIsDirty; seenDirtyPages |= thisPageIsDirty;
continue; // Continue looping to the next page. continue; // Continue looping to the next page.
} }


if ( page.tryExclusiveLock() ) // TODO somehow avoid taking these exclusive locks, that we currently need to avoid racing with other flushes if ( page.tryFreezeLock() )
{ {
try try
{ {
Expand All @@ -1005,7 +1006,7 @@ private long flushAtIORatio( double ratio )
} }
finally finally
{ {
page.unlockExclusive(); page.unlockFreeze();
} }
} }


Expand Down
Expand Up @@ -69,7 +69,7 @@ final class MuninnPagedFile implements PagedFile
* The layout looks like this: * The layout looks like this:
* *
* ┏━ Empty file marker bit. When 1, the file is empty. * ┏━ Empty file marker bit. When 1, the file is empty.
* ┃ ┏━ Reference count, 15 bites. * ┃ ┏━ Reference count, 15 bits.
* ┃ ┃ ┏━ 48 bits for the last page id. * ┃ ┃ ┏━ 48 bits for the last page id.
* ┃┏━━━┻━━━━━━━━━━┓ ┏━━━┻━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ * ┃┏━━━┻━━━━━━━━━━┓ ┏━━━┻━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
* MRRRRRRR RRRRRRRR IIIIIIII IIIIIIII IIIIIIII IIIIIIII IIIIIIII IIIIIIII * MRRRRRRR RRRRRRRR IIIIIIII IIIIIIII IIIIIIII IIIIIIII IIIIIIII IIIIIIII
Expand Down Expand Up @@ -225,7 +225,7 @@ void flushAndForceInternal( FlushEventOpportunity flushOpportunity, boolean forC
if ( element instanceof MuninnPage ) if ( element instanceof MuninnPage )
{ {
MuninnPage page = (MuninnPage) element; MuninnPage page = (MuninnPage) element;
if ( !(forClosing? page.tryExclusiveLock() : page.tryWriteLock()) ) if ( !(forClosing? page.tryExclusiveLock() : page.tryFreezeLock()) )
{ {
continue; continue;
} }
Expand All @@ -244,7 +244,7 @@ else if ( forClosing )
} }
else else
{ {
page.unlockWrite(); page.unlockFreeze();
} }
} }
break; break;
Expand Down Expand Up @@ -322,7 +322,7 @@ private int vectoredFlush(
} }
else else
{ {
pages[j].unlockWrite(); pages[j].unlockFreeze();
} }
} }
} }
Expand Down
Expand Up @@ -42,19 +42,38 @@
*/ */
public class SequenceLock public class SequenceLock
{ {
// Bits for counting concurrent write-locks. We use 17 bits because our pages are most likely 8192 bytes, and /*
// 2^17 = 131.072, which is far more than our page size, so makes it highly unlikely that we are going to overflow * Bits for counting concurrent write-locks. We use 17 bits because our pages are most likely 8192 bytes, and
// our concurrent write lock counter. Meanwhile, it's also small enough that we have a very large (2^46) number * 2^17 = 131.072, which is far more than our page size, so makes it highly unlikely that we are going to overflow
// space for our sequence. * our concurrent write lock counter. Meanwhile, it's also small enough that we have a very large (2^45) number
* space for our sequence. This one value controls the layout of the lock bit-state. The rest of the layout is
* derived from this.
*
* With 17 writer count bits, the layout looks like this:
*
* ┏━ Freeze lock bit
* ┃┏━ Exclusive lock bit
* ┃┃ ┏━ Count of currently concurrently held write locks, 17 bits.
* ┃┃ ┃ ┏━ 45 bits for the read lock sequence, incremented on write & exclusive unlock.
* ┃┃┏━━━┻━━━━━━━━━━━━━┓┏━━━┻━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
* FEWWWWWW WWWWWWWW WWWSSSSS SSSSSSSS SSSSSSSS SSSSSSSS SSSSSSSS SSSSSSSS
* 1 2 3 4 5 6 7 8 byte
*/
private static final long CNT_BITS = 17; private static final long CNT_BITS = 17;


private static final long BITS_IN_LONG = 64; private static final long BITS_IN_LONG = 64;
private static final long SEQ_BITS = BITS_IN_LONG - 1 - CNT_BITS; private static final long EXL_LOCK_BITS = 1;
private static final long FRZ_LOCK_BITS = 1;
private static final long SEQ_BITS = BITS_IN_LONG - FRZ_LOCK_BITS - EXL_LOCK_BITS - CNT_BITS;
private static final long CNT_UNIT = 1L << SEQ_BITS; private static final long CNT_UNIT = 1L << SEQ_BITS;
private static final long SEQ_MASK = CNT_UNIT - 1L; private static final long SEQ_MASK = CNT_UNIT - 1L;
private static final long SEQ_IMSK = ~SEQ_MASK; private static final long SEQ_IMSK = ~SEQ_MASK;
private static final long CNT_MASK = ((1L << CNT_BITS) - 1L) << SEQ_BITS; private static final long CNT_MASK = ((1L << CNT_BITS) - 1L) << SEQ_BITS;
private static final long EXCL_MASK = (1L << CNT_BITS + SEQ_BITS); private static final long EXL_MASK = (1L << CNT_BITS + SEQ_BITS);
private static final long FRZ_MASK = (1L << CNT_BITS + SEQ_BITS + 1L);
private static final long FRZ_IMSK = ~FRZ_MASK;
private static final long FAE_MASK = FRZ_MASK + EXL_MASK; // "freeze and/or exclusive" mask
private static final long UNL_MASK = FAE_MASK + CNT_MASK; // unlocked mask


private static final long STATE = UnsafeUtil.getFieldOffset( SequenceLock.class, "state" ); private static final long STATE = UnsafeUtil.getFieldOffset( SequenceLock.class, "state" );


Expand All @@ -71,6 +90,11 @@ private boolean compareAndSetState( long expect, long update )
return UnsafeUtil.compareAndSwapLong( this, STATE, expect, update ); return UnsafeUtil.compareAndSwapLong( this, STATE, expect, update );
} }


private void unconditionallySetState( long update )
{
state = update;
}

/** /**
* Start an optimistic critical section, and return a stamp that can be used to validate if the read-lock was * Start an optimistic critical section, and return a stamp that can be used to validate if the read-lock was
* consistent. That is, if no write or exclusive lock was overlapping with the optimistic read-lock. * consistent. That is, if no write or exclusive lock was overlapping with the optimistic read-lock.
Expand All @@ -93,7 +117,7 @@ public long tryOptimisticReadLock()
public boolean validateReadLock( long stamp ) public boolean validateReadLock( long stamp )
{ {
UnsafeUtil.loadFence(); UnsafeUtil.loadFence();
return getState() == stamp; return (getState() & FRZ_IMSK) == stamp;
} }


/** /**
Expand All @@ -111,14 +135,15 @@ public boolean tryWriteLock()
for (; ; ) for (; ; )
{ {
s = getState(); s = getState();
if ( (s & EXCL_MASK) == EXCL_MASK ) boolean unwritablyLocked = (s & FAE_MASK) != 0;
{ boolean writeCountOverflow = (s & CNT_MASK) == CNT_MASK;
return false;
} // bitwise-OR to reduce branching and allow more ILP
if ( (s & CNT_MASK) == CNT_MASK ) if ( unwritablyLocked | writeCountOverflow )
{ {
throwWriteLockOverflow( s ); return failWriteLock( s, writeCountOverflow );
} }

n = s + CNT_UNIT; n = s + CNT_UNIT;
if ( compareAndSetState( s, n ) ) if ( compareAndSetState( s, n ) )
{ {
Expand All @@ -127,6 +152,16 @@ public boolean tryWriteLock()
} }
} }


private boolean failWriteLock( long s, boolean writeCountOverflow )
{
if ( writeCountOverflow )
{
throwWriteLockOverflow( s );
}
// Otherwise it was either exclusively or freeze locked
return false;
}

private long throwWriteLockOverflow( long s ) private long throwWriteLockOverflow( long s )
{ {
throw new IllegalMonitorStateException( "Write lock counter overflow: " + describeState( s ) ); throw new IllegalMonitorStateException( "Write lock counter overflow: " + describeState( s ) );
Expand Down Expand Up @@ -172,7 +207,7 @@ private long nextSeq( long s )
public boolean tryExclusiveLock() public boolean tryExclusiveLock()
{ {
long s = getState(); long s = getState();
return ((s & CNT_MASK) == 0) & ((s & EXCL_MASK) == 0) && compareAndSetState( s, s + EXCL_MASK ); return ((s & UNL_MASK) == 0) && compareAndSetState( s, s + EXL_MASK );
} }


/** /**
Expand All @@ -184,8 +219,9 @@ public boolean tryExclusiveLock()
public long unlockExclusive() public long unlockExclusive()
{ {
long s = initiateExclusiveLockRelease(); long s = initiateExclusiveLockRelease();
long n = nextSeq( s ) - EXCL_MASK; long n = nextSeq( s ) - EXL_MASK;
compareAndSetState( s, n ); // Exclusive locks prevent any state modifications from write locks
unconditionallySetState( n );
return n; return n;
} }


Expand All @@ -195,14 +231,14 @@ public long unlockExclusive()
public void unlockExclusiveAndTakeWriteLock() public void unlockExclusiveAndTakeWriteLock()
{ {
long s = initiateExclusiveLockRelease(); long s = initiateExclusiveLockRelease();
long n = nextSeq( s ) - EXCL_MASK + CNT_UNIT; long n = nextSeq( s ) - EXL_MASK + CNT_UNIT;
compareAndSetState( s, n ); unconditionallySetState( n );
} }


private long initiateExclusiveLockRelease() private long initiateExclusiveLockRelease()
{ {
long s = getState(); long s = getState();
if ( (s & EXCL_MASK) != EXCL_MASK ) if ( (s & EXL_MASK) != EXL_MASK )
{ {
throwUnmatchedUnlockExclusive( s ); throwUnmatchedUnlockExclusive( s );
} }
Expand All @@ -214,6 +250,30 @@ private void throwUnmatchedUnlockExclusive( long s )
throw new IllegalMonitorStateException( "Unmatched unlockExclusive: " + describeState( s ) ); throw new IllegalMonitorStateException( "Unmatched unlockExclusive: " + describeState( s ) );
} }


public boolean tryFreezeLock()
{
long s = getState();
return ((s & UNL_MASK) == 0) && compareAndSetState( s, s + FRZ_MASK );
}

public void unlockFreeze()
{
long s = getState();
if ( (s & FRZ_MASK) != FRZ_MASK )
{
throwUnmatchedUnlockFreeze( s );
}
// We don't increment the sequence with nextSeq here, because freeze locks don't invalidate readers
long n = s - FRZ_MASK;
// Freeze locks prevent any state modifications from write and exclusive locks
unconditionallySetState( n );
}

private void throwUnmatchedUnlockFreeze( long s )
{
throw new IllegalMonitorStateException( "Unmatched unlockFreeze: " + describeState( s ) );
}

@Override @Override
public String toString() public String toString()
{ {
Expand Down

0 comments on commit 4fe9163

Please sign in to comment.