Skip to content

Commit

Permalink
Tighten up the page cache code a little
Browse files Browse the repository at this point in the history
This work has been guided by the HotSpot JIT assembly output, so some of the changes might not be super obvious.
The general idea is to produce less assembly code for the same functionality.
  • Loading branch information
chrisvest committed Nov 6, 2015
1 parent e773bd1 commit d2bea58
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 30 deletions.
Expand Up @@ -25,13 +25,16 @@
import org.neo4j.concurrent.BinaryLatch;
import org.neo4j.io.pagecache.PageCursor;
import org.neo4j.io.pagecache.PageSwapper;
import org.neo4j.io.pagecache.tracing.PageCacheTracer;
import org.neo4j.io.pagecache.tracing.PageFaultEvent;
import org.neo4j.io.pagecache.tracing.PinEvent;
import org.neo4j.unsafe.impl.internal.dragons.UnsafeUtil;

abstract class MuninnPageCursor implements PageCursor
{
protected MuninnPagedFile pagedFile;
protected PageSwapper swapper;
protected PageCacheTracer tracer;
protected MuninnPage page;
protected PinEvent pinEvent;
protected long pageId;
Expand All @@ -48,6 +51,8 @@ abstract class MuninnPageCursor implements PageCursor
public final void initialise( MuninnPagedFile pagedFile, long pageId, int pf_flags )
{
this.pagedFile = pagedFile;
this.swapper = pagedFile.swapper;
this.tracer = pagedFile.tracer;
this.pageId = pageId;
this.pf_flags = pf_flags;
}
Expand Down Expand Up @@ -135,16 +140,15 @@ public final File getCurrentFile()
*/
protected void pin( long filePageId, boolean exclusive ) throws IOException
{
PageSwapper swapper = pagedFile.swapper;
pinEvent = pagedFile.tracer.beginPin( exclusive, filePageId, swapper );
int chunkId = pagedFile.computeChunkId( filePageId );
pinEvent = tracer.beginPin( exclusive, filePageId, swapper );
int chunkId = MuninnPagedFile.computeChunkId( filePageId );
// The chunkOffset is the addressing offset into the chunk array object for the relevant array slot. Using
// this, we can access the array slot with Unsafe.
long chunkOffset = pagedFile.computeChunkOffset( filePageId );
long chunkOffset = MuninnPagedFile.computeChunkOffset( filePageId );
Object[][] tt = pagedFile.translationTable;
if ( tt.length <= chunkId )
{
tt = pagedFile.expandCapacity( chunkId );
tt = expandTranslationTableCapacity( chunkId );
}
Object[] chunk = tt[chunkId];

Expand All @@ -158,18 +162,7 @@ protected void pin( long filePageId, boolean exclusive ) throws IOException
do
{
item = UnsafeUtil.getObjectVolatile( chunk, chunkOffset );
if ( item == null )
{
// Looks like there's no mapping, so we'd like to do a page fault.
BinaryLatch latch = new BinaryLatch();
if ( UnsafeUtil.compareAndSwapObject( chunk, chunkOffset, null, latch ) )
{
// We managed to inject our latch, so we now own the right to perform the page fault. We also
// have a duty to eventually release and remove the latch, no matter what happens now.
item = pageFault( filePageId, swapper, chunkOffset, chunk, latch );
}
}
else if ( item.getClass() == MuninnPage.class )
if ( item != null && item.getClass() == MuninnPage.class )
{
// We got *a* page, but we might be racing with eviction. To cope with that, we have to take some
// kind of lock on the page, and check that it is indeed bound to what we expect. If not, then it has
Expand All @@ -183,19 +176,48 @@ else if ( item.getClass() == MuninnPage.class )
item = null;
}
}
else if ( item == null )
{
// Looks like there's no mapping, so we'd like to do a page fault.
item = initiatePageFault( filePageId, chunkOffset, chunk );
}
else
{
// We found a latch, so someone else is already doing a page fault for this page. So we'll just wait
// for them to finish, and grab the page then.
BinaryLatch latch = (BinaryLatch) item;
latch.await();
item = null;
item = awaitPageFault( item );
}
}
while ( item == null );
pinCursorToPage( (MuninnPage) item, filePageId, swapper );
}

private Object[][] expandTranslationTableCapacity( int chunkId )
{
return pagedFile.expandCapacity( chunkId );
}

private Object initiatePageFault( long filePageId, long chunkOffset, Object[] chunk )
throws IOException
{
BinaryLatch latch = new BinaryLatch();
Object item = null;
if ( UnsafeUtil.compareAndSwapObject( chunk, chunkOffset, null, latch ) )
{
// We managed to inject our latch, so we now own the right to perform the page fault. We also
// have a duty to eventually release and remove the latch, no matter what happens now.
item = pageFault( filePageId, swapper, chunkOffset, chunk, latch );
}
return item;
}

private Object awaitPageFault( Object item )
{
BinaryLatch latch = (BinaryLatch) item;
latch.await();
return null;
}

private MuninnPage pageFault(
long filePageId, PageSwapper swapper, long chunkOffset, Object[] chunk, BinaryLatch latch )
throws IOException
Expand Down
Expand Up @@ -39,7 +39,7 @@ final class MuninnPagedFile implements PagedFile
private static final int translationTableChunkSizePower = Integer.getInteger(
"org.neo4j.io.pagecache.impl.muninn.MuninnPagedFile.translationTableChunkSizePower", 12 );
private static final int translationTableChunkSize = 1 << translationTableChunkSizePower;
private static final int translationTableChunkSizeMask = translationTableChunkSize - 1;
private static final long translationTableChunkSizeMask = translationTableChunkSize - 1;
private static final int translationTableChunkArrayBase = UnsafeUtil.arrayBaseOffset( MuninnPage[].class );
private static final int translationTableChunkArrayScale = UnsafeUtil.arrayIndexScale( MuninnPage[].class );

Expand Down Expand Up @@ -450,12 +450,12 @@ private int computeNewRootTableLength( int maxChunkId )
return 1 + (int) (maxChunkId * 1.1);
}

int computeChunkId( long filePageId )
static int computeChunkId( long filePageId )
{
return (int) (filePageId >>> translationTableChunkSizePower);
}

long computeChunkOffset( long filePageId )
static long computeChunkOffset( long filePageId )
{
int index = (int) (filePageId & translationTableChunkSizeMask);
return UnsafeUtil.arrayOffset( index, translationTableChunkArrayBase, translationTableChunkArrayScale );
Expand Down
Expand Up @@ -34,7 +34,7 @@ public interface PageCacheTracer extends PageCacheMonitor
/**
* A PageCacheTracer that does nothing other than return the NULL variants of the companion interfaces.
*/
public static final PageCacheTracer NULL = new PageCacheTracer()
PageCacheTracer NULL = new PageCacheTracer()
{
@Override
public void mappedFile( File file )
Expand Down Expand Up @@ -140,12 +140,12 @@ public String toString()
/**
* The given file has been mapped, where no existing mapping for that file existed.
*/
public void mappedFile( File file );
void mappedFile( File file );

/**
* The last reference to the given file has been unmapped.
*/
public void unmappedFile( File file );
void unmappedFile( File file );

/**
* A background eviction has begun. Called from the background eviction thread.
Expand All @@ -154,20 +154,20 @@ public String toString()
*
* The method returns an EvictionRunEvent to represent the event of this eviction run.
**/
public EvictionRunEvent beginPageEvictions( int pageCountToEvict );
EvictionRunEvent beginPageEvictions( int pageCountToEvict );

/**
* A page is to be pinned.
*/
public PinEvent beginPin( boolean exclusiveLock, long filePageId, PageSwapper swapper );
PinEvent beginPin( boolean exclusiveLock, long filePageId, PageSwapper swapper );

/**
* A PagedFile wants to flush all its bound pages.
*/
public MajorFlushEvent beginFileFlush( PageSwapper swapper );
MajorFlushEvent beginFileFlush( PageSwapper swapper );

/**
* The PageCache wants to flush all its bound pages.
*/
public MajorFlushEvent beginCacheFlush();
MajorFlushEvent beginCacheFlush();
}

0 comments on commit d2bea58

Please sign in to comment.