Skip to content

Commit

Permalink
Speed up the initialisation of the PageList memory.
Browse files Browse the repository at this point in the history
This makes it a few times faster to allocate and construct page caches with large memory volumes.
  • Loading branch information
chrisvest committed Nov 17, 2017
1 parent 6bca375 commit d36182e
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 6 deletions.
Expand Up @@ -31,6 +31,7 @@
import org.neo4j.unsafe.impl.internal.dragons.UnsafeUtil;

import static java.lang.String.format;
import static org.neo4j.unsafe.impl.internal.dragons.FeatureToggles.flag;

/**
* The PageList maintains the off-heap meta-data for the individual memory pages.
Expand All @@ -49,6 +50,8 @@
*/
class PageList
{
private static final boolean forceSlowMemoryClear = flag( PageList.class, "forceSlowMemoryClear", false );

private static final int META_DATA_BYTES_PER_PAGE = 32;
private static final int OFFSET_LOCK_WORD = 0; // 8 bytes
private static final int OFFSET_ADDRESS = 8; // 8 bytes
Expand Down Expand Up @@ -123,6 +126,21 @@ class PageList
}

private void clearMemory( long baseAddress, long pageCount )
{
long memcpyChunkSize = UnsafeUtil.pageSize();
long metaDataEntriesPerChunk = memcpyChunkSize / META_DATA_BYTES_PER_PAGE;
if ( pageCount < metaDataEntriesPerChunk || forceSlowMemoryClear )
{
clearMemorySimple( baseAddress, pageCount );
}
else
{
clearMemoryFast( baseAddress, pageCount, memcpyChunkSize, metaDataEntriesPerChunk );
}
UnsafeUtil.fullFence(); // Guarantee the visibility of the cleared memory.
}

private void clearMemorySimple( long baseAddress, long pageCount )
{
long address = baseAddress - 8;
for ( long i = 0; i < pageCount; i++ )
Expand All @@ -132,7 +150,23 @@ private void clearMemory( long baseAddress, long pageCount )
UnsafeUtil.putLong( address += 8, PageCursor.UNBOUND_PAGE_ID ); // file page id
UnsafeUtil.putLong( address += 8, 0 ); // rest
}
UnsafeUtil.fullFence(); // Guarantee the visibility of the cleared memory
}

private void clearMemoryFast( long baseAddress, long pageCount, long memcpyChunkSize, long metaDataEntriesPerChunk )
{
// Initialise one chunk worth of data.
clearMemorySimple( baseAddress, metaDataEntriesPerChunk );
// Since all entries contain the same data, we can now copy this chunk over and over.
long chunkCopies = pageCount / metaDataEntriesPerChunk - 1;
long address = baseAddress + memcpyChunkSize;
for ( int i = 0; i < chunkCopies; i++ )
{
UnsafeUtil.copyMemory( baseAddress, address, memcpyChunkSize );
address += memcpyChunkSize;
}
// Finally fill in the tail.
long tailCount = pageCount % metaDataEntriesPerChunk;
clearMemorySimple( address, tailCount );
}

/**
Expand Down
Expand Up @@ -21,36 +21,59 @@

import org.junit.Test;

import java.util.stream.IntStream;

import org.neo4j.io.ByteUnit;
import org.neo4j.unsafe.impl.internal.dragons.MemoryManager;

import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;

public class LargePageListTest
public class LargePageListIT
{
private static final long ALIGNMENT = 8;

@Test
public void veryLargePageListsMustBeFullyAccessible() throws Exception
{
long pageCacheSize = ByteUnit.tebiBytes( 1 );
// We need roughly 4 GiBs of memory for the meta-data here, which is why this is an IT and not a Test.
// We add one extra page worth of data to the size here, to avoid ending up on a "convenient" boundary.
int pageSize = (int) ByteUnit.kibiBytes( 8 );
long pageCacheSize = ByteUnit.tebiBytes( 1 ) + pageSize;
int pages = Math.toIntExact( pageCacheSize / pageSize );

MemoryManager mman = new MemoryManager( ByteUnit.mebiBytes( 1 ), ALIGNMENT );
MemoryManager mman = new MemoryManager( ByteUnit.gibiBytes( 4 ), ALIGNMENT );
SwapperSet swappers = new SwapperSet();
long victimPage = VictimPageReference.getVictimPage( pageSize );

PageList pageList = new PageList( pages, pageSize, mman, swappers, victimPage );

// Verify we end up with the correct number of pages.
assertThat( pageList.getPageCount(), is( pages ) );
long ref = pageList.deref( pages - 1 );

// Spot-check the accessibility in the bulk of the pages.
IntStream.range( 0, pages / 32 ).parallel().forEach( id ->
{
verifyPageMetaDataIsAccessible( pageList, id * 32 );
} );

// Thoroughly check the accessibility around the tail end of the page list.
IntStream.range( pages - 2000, pages ).parallel().forEach( id ->
{
verifyPageMetaDataIsAccessible( pageList, id );
} );
}

private void verifyPageMetaDataIsAccessible( PageList pageList, int id )
{
long ref = pageList.deref( id );
pageList.incrementUsage( ref );
pageList.incrementUsage( ref );
assertFalse( pageList.decrementUsage( ref ) );
assertTrue( pageList.decrementUsage( ref ) );
System.out.println( "mman.sumUsedMemory() = " + mman.sumUsedMemory() );
assertEquals( id, pageList.toId( ref ) );
}
}

0 comments on commit d36182e

Please sign in to comment.