Skip to content

Commit

Permalink
Cleanup and enhance tests
Browse files Browse the repository at this point in the history
  • Loading branch information
MishaDemianenko committed Mar 21, 2017
1 parent b2ade02 commit 83de697
Show file tree
Hide file tree
Showing 5 changed files with 125 additions and 63 deletions.

This file was deleted.

Expand Up @@ -23,6 +23,8 @@
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
Expand Down Expand Up @@ -89,7 +91,7 @@ private IndexWriter newIndexWriter( IndexIdentifier identifier )
Directory indexDirectory = getIndexDirectory( identifier ); Directory indexDirectory = getIndexDirectory( identifier );
IndexType type = getType( identifier ); IndexType type = getType( identifier );
IndexWriterConfig writerConfig = new IndexWriterConfig( type.analyzer ); IndexWriterConfig writerConfig = new IndexWriterConfig( type.analyzer );
writerConfig.setIndexDeletionPolicy( new MultipleBackupDeletionPolicy() ); writerConfig.setIndexDeletionPolicy( new SnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy() ) );
Similarity similarity = type.getSimilarity(); Similarity similarity = type.getSimilarity();
if ( similarity != null ) if ( similarity != null )
{ {
Expand Down
Expand Up @@ -23,10 +23,11 @@
import org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat; import org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat;
import org.apache.lucene.codecs.lucene54.Lucene54Codec; import org.apache.lucene.codecs.lucene54.Lucene54Codec;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;


import org.neo4j.index.impl.lucene.legacy.LuceneDataSource; import org.neo4j.index.impl.lucene.legacy.LuceneDataSource;
import org.neo4j.index.impl.lucene.legacy.MultipleBackupDeletionPolicy;
import org.neo4j.unsafe.impl.internal.dragons.FeatureToggles; import org.neo4j.unsafe.impl.internal.dragons.FeatureToggles;


/** /**
Expand Down Expand Up @@ -70,7 +71,7 @@ public static IndexWriterConfig standard()


writerConfig.setMaxBufferedDocs( MAX_BUFFERED_DOCS ); writerConfig.setMaxBufferedDocs( MAX_BUFFERED_DOCS );
writerConfig.setMaxBufferedDeleteTerms( MAX_BUFFERED_DELETE_TERMS ); writerConfig.setMaxBufferedDeleteTerms( MAX_BUFFERED_DELETE_TERMS );
writerConfig.setIndexDeletionPolicy( new MultipleBackupDeletionPolicy() ); writerConfig.setIndexDeletionPolicy( new SnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy() ) );
writerConfig.setUseCompoundFile( true ); writerConfig.setUseCompoundFile( true );
writerConfig.setRAMBufferSizeMB( STANDARD_RAM_BUFFER_SIZE_MB ); writerConfig.setRAMBufferSizeMB( STANDARD_RAM_BUFFER_SIZE_MB );
writerConfig.setCodec(new Lucene54Codec() writerConfig.setCodec(new Lucene54Codec()
Expand Down
1 change: 0 additions & 1 deletion community/neo4j/pom.xml
Expand Up @@ -100,7 +100,6 @@
<artifactId>commons-io</artifactId> <artifactId>commons-io</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>

<dependency> <dependency>
<groupId>org.neo4j</groupId> <groupId>org.neo4j</groupId>
<artifactId>neo4j-common</artifactId> <artifactId>neo4j-common</artifactId>
Expand Down
132 changes: 119 additions & 13 deletions community/neo4j/src/test/java/org/neo4j/index/backup/IndexBackupIT.java
Expand Up @@ -20,64 +20,171 @@
package org.neo4j.index.backup; package org.neo4j.index.backup;


import org.apache.commons.io.FilenameUtils; import org.apache.commons.io.FilenameUtils;
import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;


import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.LongStream;


import org.neo4j.graphdb.DependencyResolver; import org.neo4j.graphdb.DependencyResolver;
import org.neo4j.graphdb.Label; import org.neo4j.graphdb.Label;
import org.neo4j.graphdb.Node; import org.neo4j.graphdb.Node;
import org.neo4j.graphdb.ResourceIterator; import org.neo4j.graphdb.ResourceIterator;
import org.neo4j.graphdb.Transaction; import org.neo4j.graphdb.Transaction;
import org.neo4j.io.fs.FileSystemAbstraction;
import org.neo4j.kernel.impl.api.index.IndexingService; import org.neo4j.kernel.impl.api.index.IndexingService;
import org.neo4j.kernel.impl.transaction.log.checkpoint.CheckPointer; import org.neo4j.kernel.impl.transaction.log.checkpoint.CheckPointer;
import org.neo4j.kernel.impl.transaction.log.checkpoint.SimpleTriggerInfo; import org.neo4j.kernel.impl.transaction.log.checkpoint.SimpleTriggerInfo;
import org.neo4j.test.rule.EmbeddedDatabaseRule; import org.neo4j.test.rule.EmbeddedDatabaseRule;
import org.neo4j.test.rule.RandomRule;


import static java.lang.String.format;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;


public class IndexBackupIT public class IndexBackupIT
{ {
private static final String PROPERTY_PREFIX = "property";
private static final int NUMBER_OF_INDEXES = 10;

@Rule
public RandomRule randomRule = new RandomRule();
@Rule @Rule
public EmbeddedDatabaseRule database = new EmbeddedDatabaseRule( getClass() ).startLazily(); public EmbeddedDatabaseRule database = new EmbeddedDatabaseRule( getClass() );
private CheckPointer checkPointer;
private IndexingService indexingService;
private FileSystemAbstraction fileSystem;

@Before
public void setUp()
{
checkPointer = resolveDependency( CheckPointer.class );
indexingService = resolveDependency( IndexingService.class );
fileSystem = resolveDependency( FileSystemAbstraction.class );
}


@Test @Test
public void concurrentIndexSnapshotUseDifferentSnapshots() throws Exception public void concurrentIndexSnapshotUseDifferentSnapshots() throws Exception
{ {
Label label = Label.label( "testLabel" ); Label label = Label.label( "testLabel" );
prepareDatabase( label ); prepareDatabase( label );


CheckPointer checkPointer = resolveDependency( CheckPointer.class );
IndexingService indexingService = resolveDependency( IndexingService.class );

forceCheckpoint( checkPointer ); forceCheckpoint( checkPointer );
ResourceIterator<File> firstCheckpointSnapshot = indexingService.snapshotStoreFiles(); ResourceIterator<File> firstCheckpointSnapshot = indexingService.snapshotStoreFiles();
generateData( label ); generateData( label );
removeOldNodes( LongStream.range( 1, 20 ) );
updateOldNodes( LongStream.range( 30, 40 ) );


forceCheckpoint( checkPointer ); forceCheckpoint( checkPointer );
ResourceIterator<File> secondCheckpointSnapshot = indexingService.snapshotStoreFiles(); ResourceIterator<File> secondCheckpointSnapshot = indexingService.snapshotStoreFiles();


generateData( label );
removeOldNodes( LongStream.range( 50, 60 ) );
updateOldNodes( LongStream.range( 70, 80 ) );

forceCheckpoint( checkPointer );
ResourceIterator<File> thirdCheckpointSnapshot = indexingService.snapshotStoreFiles();

Set<String> firstSnapshotFileNames = getFileNames( firstCheckpointSnapshot );
Set<String> secondSnapshotFileNames = getFileNames( secondCheckpointSnapshot );
Set<String> thirdSnapshotFileNames = getFileNames( thirdCheckpointSnapshot );

compareSnapshotFiles( firstSnapshotFileNames, secondSnapshotFileNames, fileSystem );
compareSnapshotFiles( secondSnapshotFileNames, thirdSnapshotFileNames, fileSystem);
compareSnapshotFiles( thirdSnapshotFileNames, firstSnapshotFileNames, fileSystem);

firstCheckpointSnapshot.close();
secondCheckpointSnapshot.close();
thirdCheckpointSnapshot.close();

}

@Test
public void snapshotFilesDeletedWhenSnapshotReleased() throws IOException
{
Label label = Label.label( "testLabel" );
prepareDatabase( label );

ResourceIterator<File> firstCheckpointSnapshot = indexingService.snapshotStoreFiles();
generateData( label );
ResourceIterator<File> secondCheckpointSnapshot = indexingService.snapshotStoreFiles();
generateData( label );
ResourceIterator<File> thirdCheckpointSnapshot = indexingService.snapshotStoreFiles();

Set<String> firstSnapshotFileNames = getFileNames( firstCheckpointSnapshot ); Set<String> firstSnapshotFileNames = getFileNames( firstCheckpointSnapshot );
Set<String> secondSnapshotFileNames = getFileNames( secondCheckpointSnapshot ); Set<String> secondSnapshotFileNames = getFileNames( secondCheckpointSnapshot );
Set<String> thirdSnapshotFileNames = getFileNames( thirdCheckpointSnapshot );

generateData( label );
forceCheckpoint( checkPointer );

assertTrue( firstSnapshotFileNames.stream().map( File::new ).allMatch( fileSystem::fileExists ) );
assertTrue( secondSnapshotFileNames.stream().map( File::new ).allMatch( fileSystem::fileExists ) );
assertTrue( thirdSnapshotFileNames.stream().map( File::new ).allMatch( fileSystem::fileExists ) );

firstCheckpointSnapshot.close();
secondCheckpointSnapshot.close();
thirdCheckpointSnapshot.close();


for ( String nameInFirstSnapshot : firstSnapshotFileNames ) generateData( label );
forceCheckpoint( checkPointer );

assertFalse( firstSnapshotFileNames.stream().map( File::new ).anyMatch( fileSystem::fileExists ) );
assertFalse( secondSnapshotFileNames.stream().map( File::new ).anyMatch( fileSystem::fileExists ) );
assertFalse( thirdSnapshotFileNames.stream().map( File::new ).anyMatch( fileSystem::fileExists ) );
}

private void compareSnapshotFiles( Set<String> firstSnapshotFileNames, Set<String> secondSnapshotFileNames,
FileSystemAbstraction fileSystem )
{
assertThat(
format( "Should have at least %d modified index files. Snapshot files are: %s", NUMBER_OF_INDEXES + 1,
firstSnapshotFileNames ), firstSnapshotFileNames,
hasSize( greaterThanOrEqualTo( NUMBER_OF_INDEXES + 1 ) ) );
for ( String fileName : firstSnapshotFileNames )
{ {
assertFalse( "Second snapshot fileset should not have files from first snapshot set." + assertFalse( "Snapshot fileset should not have files from another snapshot set." +
describeFileSets( firstSnapshotFileNames, secondSnapshotFileNames ), describeFileSets( firstSnapshotFileNames, secondSnapshotFileNames ),
secondSnapshotFileNames.contains( nameInFirstSnapshot ) ); secondSnapshotFileNames.contains( fileName ) );
String path = FilenameUtils.getFullPath( nameInFirstSnapshot ); String path = FilenameUtils.getFullPath( fileName );
assertTrue( "Snapshot should contain files for index in path: " + path + "." + assertTrue( "Snapshot should contain files for index in path: " + path + "." +
describeFileSets( firstSnapshotFileNames, secondSnapshotFileNames ), describeFileSets( firstSnapshotFileNames, secondSnapshotFileNames ),
secondSnapshotFileNames.stream().anyMatch( name -> name.startsWith( path ) ) ); secondSnapshotFileNames.stream().anyMatch( name -> name.startsWith( path ) ) );
assertTrue( format( "Snapshot file '%s' should exist.", fileName ),
fileSystem.fileExists( new File( fileName ) ) );
}
}

private void removeOldNodes( LongStream idRange )
{
try ( Transaction transaction = database.beginTx() )
{
idRange.mapToObj( id -> database.getNodeById( id ) ).forEach( Node::delete );
transaction.success();
}
}

private void updateOldNodes( LongStream idRange )
{
try ( Transaction transaction = database.beginTx() )
{
List<Node> nodes = idRange.mapToObj( id -> database.getNodeById( id ) ).collect( Collectors.toList() );
for ( int i = 0; i < NUMBER_OF_INDEXES; i++ )
{
String propertyName = PROPERTY_PREFIX + i;
nodes.forEach( node -> node.setProperty( propertyName, randomRule.nextLong() ) );
}
transaction.success();
} }
firstCheckpointSnapshot.close();
secondCheckpointSnapshot.close();
} }


private String describeFileSets(Set<String> firstFileSet, Set<String> secondFileSet) private String describeFileSets(Set<String> firstFileSet, Set<String> secondFileSet)
Expand All @@ -104,7 +211,7 @@ private void prepareDatabase( Label label )
{ {
for ( int i = 0; i < 10; i++ ) for ( int i = 0; i < 10; i++ )
{ {
database.schema().indexFor( label ).on( "property" + i ).create(); database.schema().indexFor( label ).on( PROPERTY_PREFIX + i ).create();
} }
transaction.success(); transaction.success();
} }
Expand All @@ -117,7 +224,7 @@ private void prepareDatabase( Label label )


private void generateData( Label label ) private void generateData( Label label )
{ {
for ( int i = 0; i < 10; i++ ) for ( int i = 0; i < 100; i++ )
{ {
testNodeCreationTransaction( label, i ); testNodeCreationTransaction( label, i );
} }
Expand All @@ -129,7 +236,6 @@ private void testNodeCreationTransaction( Label label, int i )
{ {
Node node = database.createNode( label ); Node node = database.createNode( label );
node.setProperty( "property" + i, i ); node.setProperty( "property" + i, i );
node.setProperty( i + "property", i );
transaction.success(); transaction.success();
} }
} }
Expand Down

0 comments on commit 83de697

Please sign in to comment.