Skip to content

Commit

Permalink
Make page cache warmer file handling more sympathetic to how backup a…
Browse files Browse the repository at this point in the history
…nd store copy works.

* The page cache warmer now stores its profile data in GZIP format,
which includes checksums that maintain the validity of the files.
* This way, we no longer need to do our replacing file move dance in
order to attempt to update the profiles pseudo-atomically.
* Instead, the files are overwritten in-place, and if the checksum is
wrong, we just discard the profile.
* Removing the file move dance means that backup will now no longer see
that a file it wants to copy, has been removed while it wasn't looking.
* Previously, when this happened, it would cause backup or store copy to
fail, but now it never happens so we're good.
* This also solves a problem where backup could observe a file to have
contents, which gets truncated to zero before or while backup is
streaming those contents. This could also cause backup to fail with very
strange errors, but now the files are never truncated to zero, so this
problem is never encountered.
  • Loading branch information
chrisvest committed Mar 6, 2018
1 parent 23d9cfd commit 535038b
Show file tree
Hide file tree
Showing 12 changed files with 121 additions and 44 deletions.
Expand Up @@ -332,8 +332,7 @@ static Predicate<String> fileWatcherFileNameFilter()
fileName -> fileName.startsWith( TransactionLogFiles.DEFAULT_NAME ), fileName -> fileName.startsWith( TransactionLogFiles.DEFAULT_NAME ),
fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ), fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ),
filename -> filename.startsWith( StoreUtil.TEMP_COPY_DIRECTORY_NAME ), filename -> filename.startsWith( StoreUtil.TEMP_COPY_DIRECTORY_NAME ),
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF ), filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF )
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF_TMP )
); );
} }


Expand Down
Expand Up @@ -306,8 +306,7 @@ static Predicate<String> fileWatcherFileNameFilter()
fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ), fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ),
filename -> filename.startsWith( StoreUtil.BRANCH_SUBDIRECTORY ), filename -> filename.startsWith( StoreUtil.BRANCH_SUBDIRECTORY ),
filename -> filename.startsWith( StoreUtil.TEMP_COPY_DIRECTORY_NAME ), filename -> filename.startsWith( StoreUtil.TEMP_COPY_DIRECTORY_NAME ),
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF ), filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF ) );
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF_TMP ) );
} }


@Override @Override
Expand Down
Expand Up @@ -74,6 +74,5 @@ public void fileWatcherFileNameFilter()
assertTrue( filter.test( IndexConfigStore.INDEX_DB_FILE_NAME + ".any" ) ); assertTrue( filter.test( IndexConfigStore.INDEX_DB_FILE_NAME + ".any" ) );
assertTrue( filter.test( StoreUtil.TEMP_COPY_DIRECTORY_NAME ) ); assertTrue( filter.test( StoreUtil.TEMP_COPY_DIRECTORY_NAME ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) ); assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF_TMP ) );
} }
} }
Expand Up @@ -47,6 +47,5 @@ public void fileWatcherFileNameFilter()
assertTrue( filter.test( StoreUtil.BRANCH_SUBDIRECTORY ) ); assertTrue( filter.test( StoreUtil.BRANCH_SUBDIRECTORY ) );
assertTrue( filter.test( StoreUtil.TEMP_COPY_DIRECTORY_NAME ) ); assertTrue( filter.test( StoreUtil.TEMP_COPY_DIRECTORY_NAME ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) ); assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF_TMP ) );
} }
} }
Expand Up @@ -578,8 +578,7 @@ static Predicate<String> fileWatcherFileNameFilter()
fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ), fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ),
filename -> filename.startsWith( StoreUtil.BRANCH_SUBDIRECTORY ), filename -> filename.startsWith( StoreUtil.BRANCH_SUBDIRECTORY ),
filename -> filename.startsWith( StoreUtil.TEMP_COPY_DIRECTORY_NAME ), filename -> filename.startsWith( StoreUtil.TEMP_COPY_DIRECTORY_NAME ),
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF ), filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF )
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF_TMP )
); );
} }


Expand Down
Expand Up @@ -73,6 +73,5 @@ public void fileWatcherFileNameFilter()
assertTrue( filter.test( StoreUtil.BRANCH_SUBDIRECTORY ) ); assertTrue( filter.test( StoreUtil.BRANCH_SUBDIRECTORY ) );
assertTrue( filter.test( StoreUtil.TEMP_COPY_DIRECTORY_NAME ) ); assertTrue( filter.test( StoreUtil.TEMP_COPY_DIRECTORY_NAME ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) ); assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF_TMP ) );
} }
} }
Expand Up @@ -78,8 +78,7 @@ static Predicate<String> enterpriseNonClusterFileWatcherFileNameFilter()
return Predicates.any( return Predicates.any(
fileName -> fileName.startsWith( TransactionLogFiles.DEFAULT_NAME ), fileName -> fileName.startsWith( TransactionLogFiles.DEFAULT_NAME ),
fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ), fileName -> fileName.startsWith( IndexConfigStore.INDEX_DB_FILE_NAME ),
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF ), filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF )
filename -> filename.endsWith( PageCacheWarmer.SUFFIX_CACHEPROF_TMP )
); );
} }


Expand Down
Expand Up @@ -19,14 +19,11 @@
*/ */
package org.neo4j.kernel.impl.pagecache; package org.neo4j.kernel.impl.pagecache;


import org.apache.commons.compress.compressors.CompressorException;
import org.apache.commons.compress.compressors.CompressorStreamFactory;

import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.nio.file.StandardCopyOption; import java.nio.ByteBuffer;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.OptionalLong; import java.util.OptionalLong;
Expand All @@ -37,10 +34,15 @@
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import java.util.zip.ZipException;


import org.neo4j.graphdb.Resource; import org.neo4j.graphdb.Resource;
import org.neo4j.io.IOUtils; import org.neo4j.io.IOUtils;
import org.neo4j.io.fs.FileSystemAbstraction; import org.neo4j.io.fs.FileSystemAbstraction;
import org.neo4j.io.fs.OpenMode;
import org.neo4j.io.fs.StoreChannel;
import org.neo4j.io.pagecache.PageCache; import org.neo4j.io.pagecache.PageCache;
import org.neo4j.io.pagecache.PageCursor; import org.neo4j.io.pagecache.PageCursor;
import org.neo4j.io.pagecache.PagedFile; import org.neo4j.io.pagecache.PagedFile;
Expand All @@ -65,15 +67,8 @@
public class PageCacheWarmer implements NeoStoreFileListing.StoreFileProvider public class PageCacheWarmer implements NeoStoreFileListing.StoreFileProvider
{ {
public static final String SUFFIX_CACHEPROF = ".cacheprof"; public static final String SUFFIX_CACHEPROF = ".cacheprof";
public static final String SUFFIX_CACHEPROF_TMP = ".cacheprof.tmp";


// We use the deflate algorithm since it has been experimentally shown to be both the fastest,
// and the algorithm that produces the smallest output.
// For instance, a 5.7 GiB file where 7 out of 8 pages are in memory, produces a 57 KiB profile file,
// where the uncompressed profile is 87.5 KiB. A 35% reduction.
private static final String COMPRESSION_FORMAT = CompressorStreamFactory.getDeflate();
private static final int IO_PARALLELISM = Runtime.getRuntime().availableProcessors(); private static final int IO_PARALLELISM = Runtime.getRuntime().availableProcessors();
private static final CompressorStreamFactory COMPRESSOR_FACTORY = new CompressorStreamFactory( true, 1024 );


private final FileSystemAbstraction fs; private final FileSystemAbstraction fs;
private final PageCache pageCache; private final PageCache pageCache;
Expand Down Expand Up @@ -109,7 +104,7 @@ public synchronized Resource addFilesTo( Collection<StoreFileMetadata> coll ) th
List<PagedFile> files = pageCache.listExistingMappings(); List<PagedFile> files = pageCache.listExistingMappings();
for ( PagedFile file : files ) for ( PagedFile file : files )
{ {
File profileFile = profileOutputFileFinal( file ); File profileFile = profileOutputFileName( file );
if ( fs.fileExists( profileFile ) ) if ( fs.fileExists( profileFile ) )
{ {
coll.add( new StoreFileMetadata( profileFile, 1, false ) ); coll.add( new StoreFileMetadata( profileFile, 1, false ) );
Expand Down Expand Up @@ -158,13 +153,31 @@ public synchronized OptionalLong reheat() throws IOException
private long reheat( PagedFile file ) throws IOException private long reheat( PagedFile file ) throws IOException
{ {
long pagesLoaded = 0; long pagesLoaded = 0;
File savedProfile = profileOutputFileFinal( file ); File savedProfile = profileOutputFileName( file );


if ( !fs.fileExists( savedProfile ) ) if ( !fs.fileExists( savedProfile ) )
{ {
return pagesLoaded; return pagesLoaded;
} }


// First read through the profile to verify its checksum.
try ( InputStream inputStream = compressedInputStream( savedProfile ) )
{
int b;
do
{
b = inputStream.read();
}
while ( b != -1 );
}
catch ( ZipException ignore )
{
// ZipException is used to indicate checksum failures.
// Let's ignore this file since it's corrupt.
return pagesLoaded;
}

// The file contents checks out. Let's load it in.
try ( InputStream inputStream = compressedInputStream( savedProfile ); try ( InputStream inputStream = compressedInputStream( savedProfile );
PageLoader loader = pageLoaderFactory.getLoader( file ) ) PageLoader loader = pageLoaderFactory.getLoader( file ) )
{ {
Expand Down Expand Up @@ -224,9 +237,9 @@ public synchronized OptionalLong profile() throws IOException
private long profile( PagedFile file ) throws IOException private long profile( PagedFile file ) throws IOException
{ {
long pagesInMemory = 0; long pagesInMemory = 0;
File outputNext = profileOutputFileNext( file ); File outputFile = profileOutputFileName( file );


try ( OutputStream outputStream = compressedOutputStream( outputNext ); try ( OutputStream outputStream = compressedOutputStream( outputFile );
PageCursor cursor = file.io( 0, PF_SHARED_READ_LOCK | PF_NO_FAULT ) ) PageCursor cursor = file.io( 0, PF_SHARED_READ_LOCK | PF_NO_FAULT ) )
{ {
int stepper = 0; int stepper = 0;
Expand Down Expand Up @@ -258,8 +271,6 @@ private long profile( PagedFile file ) throws IOException
outputStream.flush(); outputStream.flush();
} }


File outputFinal = profileOutputFileFinal( file );
fs.renameFile( outputNext, outputFinal, StandardCopyOption.REPLACE_EXISTING );
return pagesInMemory; return pagesInMemory;
} }


Expand All @@ -268,9 +279,9 @@ private InputStream compressedInputStream( File input ) throws IOException
InputStream source = fs.openAsInputStream( input ); InputStream source = fs.openAsInputStream( input );
try try
{ {
return COMPRESSOR_FACTORY.createCompressorInputStream( COMPRESSION_FORMAT, source ); return new GZIPInputStream( source );
} }
catch ( CompressorException e ) catch ( IOException e )
{ {
IOUtils.closeAllSilently( source ); IOUtils.closeAllSilently( source );
throw new IOException( "Exception when building decompressor.", e ); throw new IOException( "Exception when building decompressor.", e );
Expand All @@ -279,31 +290,44 @@ private InputStream compressedInputStream( File input ) throws IOException


private OutputStream compressedOutputStream( File output ) throws IOException private OutputStream compressedOutputStream( File output ) throws IOException
{ {
OutputStream sink = fs.openAsOutputStream( output, false ); StoreChannel channel = fs.open( output, OpenMode.READ_WRITE );
ByteBuffer buf = ByteBuffer.allocate( 1 );
OutputStream sink = new OutputStream()
{
@Override
public void write( int b ) throws IOException
{
buf.put( (byte) b );
buf.flip();
channel.write( buf );
buf.flip();
}

@Override
public void close() throws IOException
{
channel.truncate( channel.position() );
channel.close();
}
};
try try
{ {
return COMPRESSOR_FACTORY.createCompressorOutputStream( COMPRESSION_FORMAT, sink ); return new GZIPOutputStream( sink );
} }
catch ( CompressorException e ) catch ( IOException e )
{ {
IOUtils.closeAllSilently( sink ); // We close the channel instead of the sink here, because we don't want to truncate the file if we fail
// to open the gzip output stream.
IOUtils.closeAllSilently( channel );
throw new IOException( "Exception when building compressor.", e ); throw new IOException( "Exception when building compressor.", e );
} }
} }


private File profileOutputFileFinal( PagedFile file ) private File profileOutputFileName( PagedFile file )
{ {
File mappedFile = file.file(); File mappedFile = file.file();
String profileOutputName = "." + mappedFile.getName() + SUFFIX_CACHEPROF; String profileOutputName = "." + mappedFile.getName() + SUFFIX_CACHEPROF;
File parent = mappedFile.getParentFile(); File parent = mappedFile.getParentFile();
return new File( parent, profileOutputName ); return new File( parent, profileOutputName );
} }

private File profileOutputFileNext( PagedFile file )
{
File mappedFile = file.file();
String profileOutputName = "." + mappedFile.getName() + SUFFIX_CACHEPROF_TMP;
File parent = mappedFile.getParentFile();
return new File( parent, profileOutputName );
}
} }
Expand Up @@ -44,6 +44,5 @@ public void fileWatcherFileNameFilter()
assertTrue( filter.test( TransactionLogFiles.DEFAULT_NAME + ".1" ) ); assertTrue( filter.test( TransactionLogFiles.DEFAULT_NAME + ".1" ) );
assertTrue( filter.test( IndexConfigStore.INDEX_DB_FILE_NAME + ".any" ) ); assertTrue( filter.test( IndexConfigStore.INDEX_DB_FILE_NAME + ".any" ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) ); assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF ) );
assertTrue( filter.test( MetaDataStore.DEFAULT_NAME + PageCacheWarmer.SUFFIX_CACHEPROF_TMP ) );
} }
} }
Expand Up @@ -30,7 +30,9 @@
import org.neo4j.causalclustering.discovery.ClusterMember; import org.neo4j.causalclustering.discovery.ClusterMember;
import org.neo4j.causalclustering.discovery.CoreClusterMember; import org.neo4j.causalclustering.discovery.CoreClusterMember;
import org.neo4j.concurrent.BinaryLatch; import org.neo4j.concurrent.BinaryLatch;
import org.neo4j.ext.udc.UdcSettings;
import org.neo4j.graphdb.factory.GraphDatabaseSettings; import org.neo4j.graphdb.factory.GraphDatabaseSettings;
import org.neo4j.kernel.configuration.Settings;
import org.neo4j.kernel.impl.pagecache.PageCacheWarmerMonitor; import org.neo4j.kernel.impl.pagecache.PageCacheWarmerMonitor;
import org.neo4j.kernel.monitoring.Monitors; import org.neo4j.kernel.monitoring.Monitors;
import org.neo4j.test.causalclustering.ClusterRule; import org.neo4j.test.causalclustering.ClusterRule;
Expand All @@ -43,7 +45,9 @@ public class PageCacheWarmupCcIT extends PageCacheWarmupTestSupport
@Rule @Rule
public ClusterRule clusterRule = new ClusterRule() public ClusterRule clusterRule = new ClusterRule()
.withNumberOfReadReplicas( 0 ) .withNumberOfReadReplicas( 0 )
.withSharedCoreParam( UdcSettings.udc_enabled, Settings.FALSE )
.withSharedCoreParam( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "100ms" ) .withSharedCoreParam( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "100ms" )
.withSharedReadReplicaParam( UdcSettings.udc_enabled, Settings.FALSE )
.withSharedReadReplicaParam( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "100ms" ); .withSharedReadReplicaParam( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "100ms" );


private Cluster cluster; private Cluster cluster;
Expand Down
Expand Up @@ -30,6 +30,8 @@
import org.neo4j.commandline.admin.BlockerLocator; import org.neo4j.commandline.admin.BlockerLocator;
import org.neo4j.commandline.admin.CommandLocator; import org.neo4j.commandline.admin.CommandLocator;
import org.neo4j.commandline.admin.RealOutsideWorld; import org.neo4j.commandline.admin.RealOutsideWorld;
import org.neo4j.concurrent.BinaryLatch;
import org.neo4j.ext.udc.UdcSettings;
import org.neo4j.graphdb.factory.GraphDatabaseSettings; import org.neo4j.graphdb.factory.GraphDatabaseSettings;
import org.neo4j.io.fs.FileUtils; import org.neo4j.io.fs.FileUtils;
import org.neo4j.kernel.configuration.Settings; import org.neo4j.kernel.configuration.Settings;
Expand Down Expand Up @@ -92,6 +94,7 @@ public void cacheProfilesMustBeIncludedInOnlineBackups() throws Exception
{ {
int backupPort = PortAuthority.allocatePort(); int backupPort = PortAuthority.allocatePort();
db.setConfig( MetricsSettings.metricsEnabled, Settings.FALSE ) db.setConfig( MetricsSettings.metricsEnabled, Settings.FALSE )
.setConfig( UdcSettings.udc_enabled, Settings.FALSE )
.setConfig( OnlineBackupSettings.online_backup_enabled, Settings.TRUE ) .setConfig( OnlineBackupSettings.online_backup_enabled, Settings.TRUE )
.setConfig( OnlineBackupSettings.online_backup_server, "localhost:" + backupPort ) .setConfig( OnlineBackupSettings.online_backup_server, "localhost:" + backupPort )
.setConfig( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "100ms" ); .setConfig( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "100ms" );
Expand All @@ -100,9 +103,12 @@ public void cacheProfilesMustBeIncludedInOnlineBackups() throws Exception
createTestData( db ); createTestData( db );
long pagesInMemory = waitForCacheProfile( db ); long pagesInMemory = waitForCacheProfile( db );


BinaryLatch latch = pauseProfile( db ); // We don't want torn profile files in this test.

File metricsDirectory = dir.cleanDirectory( "metrics" ); File metricsDirectory = dir.cleanDirectory( "metrics" );
File backupDir = dir.cleanDirectory( "backup" ); File backupDir = dir.cleanDirectory( "backup" );
assertTrue( OnlineBackup.from( "localhost", backupPort ).backup( backupDir ).isConsistent() ); assertTrue( OnlineBackup.from( "localhost", backupPort ).backup( backupDir ).isConsistent() );
latch.release();
DatabaseRule.RestartAction useBackupDir = ( fs, storeDir ) -> DatabaseRule.RestartAction useBackupDir = ( fs, storeDir ) ->
{ {
fs.deleteRecursively( storeDir ); fs.deleteRecursively( storeDir );
Expand All @@ -118,6 +124,28 @@ public void cacheProfilesMustBeIncludedInOnlineBackups() throws Exception
verifyEventuallyWarmsUp( pagesInMemory, metricsDirectory ); verifyEventuallyWarmsUp( pagesInMemory, metricsDirectory );
} }


@Test
public void cacheProfilesMustNotInterfereWithOnlineBackups() throws Exception
{
// Here we are testing that the file modifications done by the page cache profiler,
// does not make online backup throw any exceptions.
int backupPort = PortAuthority.allocatePort();
db.setConfig( MetricsSettings.metricsEnabled, Settings.FALSE )
.setConfig( OnlineBackupSettings.online_backup_enabled, Settings.TRUE )
.setConfig( OnlineBackupSettings.online_backup_server, "localhost:" + backupPort )
.setConfig( GraphDatabaseSettings.pagecache_warmup_profiling_interval, "1ms" );
db.ensureStarted();

createTestData( db );
waitForCacheProfile( db );

for ( int i = 0; i < 20; i++ )
{
String backupDir = dir.cleanDirectory( "backup" ).getAbsolutePath();
assertTrue( OnlineBackup.from( "localhost", backupPort ).full( backupDir ).isConsistent() );
}
}

@Test @Test
public void cacheProfilesMustBeIncludedInOfflineBackups() throws Exception public void cacheProfilesMustBeIncludedInOfflineBackups() throws Exception
{ {
Expand Down
Expand Up @@ -68,6 +68,12 @@ long waitForCacheProfile( GraphDatabaseAPI db )
return pageCount.get(); return pageCount.get();
} }


BinaryLatch pauseProfile( GraphDatabaseAPI db )
{
Monitors monitors = db.getDependencyResolver().resolveDependency( Monitors.class );
return new PauseProfileMonitor( monitors );
}

private static class AwaitProfileMonitor implements PageCacheWarmerMonitor private static class AwaitProfileMonitor implements PageCacheWarmerMonitor
{ {
private final AtomicLong pageCount; private final AtomicLong pageCount;
Expand All @@ -91,4 +97,27 @@ public void profileCompleted( long elapsedMillis, long pagesInMemory )
profileLatch.release(); profileLatch.release();
} }
} }

private static class PauseProfileMonitor extends BinaryLatch implements PageCacheWarmerMonitor
{
private final Monitors monitors;

PauseProfileMonitor( Monitors monitors )
{
this.monitors = monitors;
monitors.addMonitorListener( this );
}

@Override
public void warmupCompleted( long elapsedMillis, long pagesLoaded )
{
}

@Override
public void profileCompleted( long elapsedMillis, long pagesInMemory )
{
await();
monitors.removeMonitorListener( this );
}
}
} }

0 comments on commit 535038b

Please sign in to comment.