Skip to content

Commit

Permalink
Adapt and rebuild page cache test that use recording tracers infrastr…
Browse files Browse the repository at this point in the history
…ucture.

Make current tests pass with new combination of page cache and page cursor tracers.
  • Loading branch information
MishaDemianenko committed Feb 27, 2017
1 parent 79614de commit 5d2ebf4
Show file tree
Hide file tree
Showing 16 changed files with 599 additions and 274 deletions.
Expand Up @@ -21,16 +21,10 @@

import java.io.File;
import java.io.IOException;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.lang.invoke.SwitchPoint;
import java.util.concurrent.atomic.AtomicLong;

import org.neo4j.io.pagecache.PageSwapper;

import static org.neo4j.unsafe.impl.internal.dragons.FeatureToggles.packageFlag;

/**
* The default PageCacheTracer implementation, that just increments counters.
*/
Expand Down Expand Up @@ -73,14 +67,7 @@ public void addPagesFlushed( int pageCount )
}
};

private final FlushEventOpportunity flushEventOpportunity = new FlushEventOpportunity()
{
@Override
public FlushEvent beginFlush( long filePageId, int cachePageId, PageSwapper swapper )
{
return flushEvent;
}
};
private final FlushEventOpportunity flushEventOpportunity = ( filePageId, cachePageId, swapper ) -> flushEvent;

private final EvictionEvent evictionEvent = new EvictionEvent()
{
Expand Down Expand Up @@ -259,4 +246,22 @@ public void bytesRead( long bytesRead )
{
this.bytesRead.getAndAdd( bytesRead );
}

@Override
public void evictions( long evictions )
{
this.evictions.getAndAdd( evictions );
}

@Override
public void bytesWritten( long bytesWritten )
{
this.bytesWritten.getAndAdd( bytesWritten );
}

@Override
public void flushes( long flushes )
{
this.flushes.getAndAdd( flushes );
}
}
Expand Up @@ -144,6 +144,21 @@ public void bytesRead( long bytesRead )
{
}

@Override
public void evictions( long evictions )
{
}

@Override
public void bytesWritten( long bytesWritten )
{
}

@Override
public void flushes( long flushes )
{
}

@Override
public String toString()
{
Expand Down Expand Up @@ -187,4 +202,10 @@ public String toString()
void faults( long faults );

void bytesRead( long bytesRead );

void evictions( long evictions );

void bytesWritten( long bytesWritten );

void flushes( long flushes );
}
Expand Up @@ -19,6 +19,7 @@
*/
package org.neo4j.io.pagecache.tracing.cursor;

import java.io.IOException;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
Expand All @@ -27,6 +28,8 @@

import org.neo4j.io.pagecache.PageSwapper;
import org.neo4j.io.pagecache.tracing.EvictionEvent;
import org.neo4j.io.pagecache.tracing.FlushEvent;
import org.neo4j.io.pagecache.tracing.FlushEventOpportunity;
import org.neo4j.io.pagecache.tracing.PageCacheTracer;
import org.neo4j.io.pagecache.tracing.PageFaultEvent;
import org.neo4j.io.pagecache.tracing.PinEvent;
Expand All @@ -39,11 +42,17 @@ public class DefaultPageCursorTracer implements PageCursorTracer
private long unpins = 0L;
private long faults = 0L;
private long bytesRead = 0L;
private long bytesWritten = 0L;
private long evictions = 0L;
private long flushes;

private long cyclePinsStart;
private long cycleUnpinsStart;
private long cycleFaultsStart;
private long cycleBytesReadStart;
private long cycleBytesWrittenStart;
private long cycleEvictionsStart;
private long cycleFlushesStart;

private PageCacheTracer pageCacheTracer;

Expand Down Expand Up @@ -100,6 +109,9 @@ public void init( PageCacheTracer pageCacheTracer )
this.cycleUnpinsStart = unpins;
this.cycleFaultsStart = faults;
this.cycleBytesReadStart = bytesRead;
this.cycleBytesWrittenStart = bytesWritten;
this.cycleEvictionsStart = evictions;
this.cycleFlushesStart = flushes;
}

public void reportEvents()
Expand All @@ -109,6 +121,9 @@ public void reportEvents()
pageCacheTracer.unpins( Math.abs( pins - cycleUnpinsStart ) );
pageCacheTracer.faults( Math.abs( faults - cycleFaultsStart ) );
pageCacheTracer.bytesRead( Math.abs( bytesRead - cycleBytesReadStart ) );
pageCacheTracer.evictions( Math.abs( evictions - cycleEvictionsStart ) );
pageCacheTracer.bytesWritten( Math.abs( bytesWritten - cycleBytesWrittenStart ) );
pageCacheTracer.flushes( Math.abs( flushes - cycleFlushesStart ) );
}

@Override
Expand Down Expand Up @@ -206,12 +221,47 @@ public void done()
}
};

private final EvictionEvent evictionEvent = new EvictionEvent()
{
@Override
public void setFilePageId( long filePageId )
{
}

@Override
public void setSwapper( PageSwapper swapper )
{
}

@Override
public FlushEventOpportunity flushEventOpportunity()
{
return flushEventOpportunity;
}

@Override
public void threwException( IOException exception )
{
}

@Override
public void setCachePageId( int cachePageId )
{
}

@Override
public void close()
{
evictions++;
}
};

private final PageFaultEvent pageFaultEvent = new PageFaultEvent()
{
@Override
public void addBytesRead( long bytes )
{
bytesRead = +bytes;
bytesRead += bytes;
}

@Override
Expand All @@ -229,8 +279,7 @@ public void done( Throwable throwable )
@Override
public EvictionEvent beginEviction()
{
//TODO: is that correct to assume that it will never be the case and can be ignored?
return EvictionEvent.NULL;
return evictionEvent;
}

@Override
Expand All @@ -239,4 +288,39 @@ public void setCachePageId( int cachePageId )
}
};

private final FlushEventOpportunity flushEventOpportunity = new FlushEventOpportunity()
{
@Override
public FlushEvent beginFlush( long filePageId, int cachePageId, PageSwapper swapper )
{
return flushEvent;
}
};

private final FlushEvent flushEvent = new FlushEvent()
{
@Override
public void addBytesWritten( long bytes )
{
bytesWritten += bytes;
}

@Override
public void done()
{
flushes++;
}

@Override
public void done( IOException exception )
{
done();
}

@Override
public void addPagesFlushed( int pageCount )
{
}
};

}
Expand Up @@ -39,8 +39,10 @@
import org.neo4j.adversaries.fs.AdversarialFileSystemAbstraction;
import org.neo4j.io.fs.FileSystemAbstraction;
import org.neo4j.io.pagecache.tracing.PageCacheTracer;
import org.neo4j.io.pagecache.tracing.cursor.PageCursorTracerSupplier;
import org.neo4j.test.LinearHistoryPageCacheTracer;
import org.neo4j.test.rule.RepeatRule;
import org.neo4j.test.LinearHistoryPageCursorTracer;

import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
Expand Down Expand Up @@ -137,7 +139,7 @@ public void mustNotLoseUpdates() throws Exception
final int threadCount = 8;
final int pageSize = threadCount * 4;

getPageCache( fs, cachePages, pageSize, PageCacheTracer.NULL );
getPageCache( fs, cachePages, pageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL );
final PagedFile pagedFile = pageCache.map( file( "a" ), pageSize );

ensureAllPagesExists( filePages, pagedFile );
Expand Down Expand Up @@ -255,7 +257,7 @@ public void mustNotLoseUpdatesWhenOpeningMultiplePageCursorsPerThread() throws E
final int maxCursorsPerThread = cachePages / (1 + threadCount);
assertThat( maxCursorsPerThread * threadCount, lessThan( cachePages ) );

getPageCache( fs, cachePages, pageSize, PageCacheTracer.NULL );
getPageCache( fs, cachePages, pageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL );
final PagedFile pagedFile = pageCache.map( file( "a" ), pageSize );

ensureAllPagesExists( filePages, pagedFile );
Expand Down Expand Up @@ -344,7 +346,7 @@ public void writeLockingCursorMustThrowWhenLockingPageRacesWithUnmapping() throw
File file = file( "a" );
generateFileWithRecords( file, recordsPerFilePage * 2, recordSize );

getPageCache( fs, maxPages, pageCachePageSize, PageCacheTracer.NULL );
getPageCache( fs, maxPages, pageCachePageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL );

final PagedFile pf = pageCache.map( file, filePageSize );
final CountDownLatch hasLockLatch = new CountDownLatch( 1 );
Expand Down Expand Up @@ -453,7 +455,8 @@ public void pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures() t
// Because our test failures are non-deterministic, we use this tracer to capture a full history of the
// events leading up to any given failure.
LinearHistoryPageCacheTracer tracer = new LinearHistoryPageCacheTracer();
getPageCache( fs, maxPages, pageCachePageSize, tracer );
//TODO:sdfasdf
getPageCache( fs, maxPages, pageCachePageSize, tracer, LinearHistoryPageCursorTracer::new );

PagedFile pfA = pageCache.map( existingFile( "a" ), filePageSize );
PagedFile pfB = pageCache.map( existingFile( "b" ), filePageSize / 2 + 1 );
Expand Down

0 comments on commit 5d2ebf4

Please sign in to comment.