Skip to content

Commit

Permalink
Clean obsolete separate lucene index provider archive index ability.
Browse files Browse the repository at this point in the history
Rename SchemaIndex.<Provider>.param() -> SchemaIndex.<Provider>.providerName()
  • Loading branch information
MishaDemianenko committed Mar 28, 2018
1 parent f46cbe7 commit 37ab919
Show file tree
Hide file tree
Showing 29 changed files with 55 additions and 121 deletions.
Expand Up @@ -252,7 +252,7 @@ public void oldLuceneSchemaIndexShouldBeConsideredConsistentWithFusionProvider()
String propKey = "propKey"; String propKey = "propKey";


// Given a lucene index // Given a lucene index
GraphDatabaseService db = getGraphDatabaseService( storeDir, defaultSchemaProvider, LUCENE10.param() ); GraphDatabaseService db = getGraphDatabaseService( storeDir, defaultSchemaProvider, LUCENE10.providerName() );
createIndex( db, label, propKey ); createIndex( db, label, propKey );
try ( Transaction tx = db.beginTx() ) try ( Transaction tx = db.beginTx() )
{ {
Expand All @@ -264,7 +264,7 @@ public void oldLuceneSchemaIndexShouldBeConsideredConsistentWithFusionProvider()


ConsistencyCheckService service = new ConsistencyCheckService(); ConsistencyCheckService service = new ConsistencyCheckService();
Config configuration = Config configuration =
Config.defaults( settings( defaultSchemaProvider, NATIVE20.param() ) ); Config.defaults( settings( defaultSchemaProvider, NATIVE20.providerName() ) );
Result result = runFullConsistencyCheck( service, configuration, storeDir ); Result result = runFullConsistencyCheck( service, configuration, storeDir );
assertTrue( result.isSuccessful() ); assertTrue( result.isSuccessful() );
} }
Expand Down
Expand Up @@ -516,23 +516,23 @@ public enum SchemaIndex
NATIVE10( "lucene+native-1.0" ), NATIVE10( "lucene+native-1.0" ),
LUCENE10( "lucene-1.0" ); LUCENE10( "lucene-1.0" );


private final String param; private final String providerName;


SchemaIndex( String param ) SchemaIndex( String providerName )
{ {
this.param = param; this.providerName = providerName;
} }


public String param() public String providerName()
{ {
return param; return providerName;
} }
} }


@Description( "Index provider to use when creating new indexes." ) @Description( "Index provider to use when creating new indexes." )
public static final Setting<String> default_schema_provider = public static final Setting<String> default_schema_provider =
setting( "dbms.index.default_schema_provider", setting( "dbms.index.default_schema_provider",
optionsIgnoreCase( SchemaIndex.NATIVE20.param(), SchemaIndex.NATIVE10.param(), SchemaIndex.LUCENE10.param() ), optionsIgnoreCase( SchemaIndex.NATIVE20.providerName(), SchemaIndex.NATIVE10.providerName(), SchemaIndex.LUCENE10.providerName() ),
null ); null );


@Description( "Location where Neo4j keeps the logical transaction logs." ) @Description( "Location where Neo4j keeps the logical transaction logs." )
Expand Down
Expand Up @@ -126,7 +126,8 @@ public void setValueWithOldSetting( String value, Map<String,String> rawConfigur
{ {
if ( value.equals( Settings.FALSE ) ) if ( value.equals( Settings.FALSE ) )
{ {
rawConfiguration.putIfAbsent( GraphDatabaseSettings.default_schema_provider.name(), GraphDatabaseSettings.SchemaIndex.LUCENE10.param() ); rawConfiguration.putIfAbsent( GraphDatabaseSettings.default_schema_provider.name(),
GraphDatabaseSettings.SchemaIndex.LUCENE10.providerName() );
} }
} }
} ); } );
Expand Down
Expand Up @@ -172,28 +172,28 @@ public void migrateEnableNativeSchemaIndex()
{ {
Map<String,String> migratedProperties = migrator.apply( stringMap( "unsupported.dbms.enable_native_schema_index", "false" ), getLog() ); Map<String,String> migratedProperties = migrator.apply( stringMap( "unsupported.dbms.enable_native_schema_index", "false" ), getLog() );
assertEquals( "Old property should be migrated to new", assertEquals( "Old property should be migrated to new",
migratedProperties, stringMap( "dbms.index.default_schema_provider", LUCENE10.param() )); migratedProperties, stringMap( "dbms.index.default_schema_provider", LUCENE10.providerName() ));


assertContainsWarningMessage("unsupported.dbms.enable_native_schema_index has been replaced with dbms.index.default_schema_provider."); assertContainsWarningMessage("unsupported.dbms.enable_native_schema_index has been replaced with dbms.index.default_schema_provider.");
} }


@Test @Test
public void skipMigrationOfEnableNativeSchemaIndexIfNotPresent() public void skipMigrationOfEnableNativeSchemaIndexIfNotPresent()
{ {
Map<String,String> migratedProperties = migrator.apply( stringMap( "dbms.index.default_schema_provider", NATIVE10.param() ), getLog() ); Map<String,String> migratedProperties = migrator.apply( stringMap( "dbms.index.default_schema_provider", NATIVE10.providerName() ), getLog() );
assertEquals( "Nothing to migrate", migratedProperties, stringMap( "dbms.index.default_schema_provider", NATIVE10.param() ) ); assertEquals( "Nothing to migrate", migratedProperties, stringMap( "dbms.index.default_schema_provider", NATIVE10.providerName() ) );
logProvider.assertNoLoggingOccurred(); logProvider.assertNoLoggingOccurred();
} }


@Test @Test
public void skipMigrationOfEnableNativeSchemaIndexIfDefaultSchemaIndexConfigured() public void skipMigrationOfEnableNativeSchemaIndexIfDefaultSchemaIndexConfigured()
{ {
Map<String,String> migratedProperties = migrator.apply( stringMap( Map<String,String> migratedProperties = migrator.apply( stringMap(
"dbms.index.default_schema_provider", NATIVE10.param(), "dbms.index.default_schema_provider", NATIVE10.providerName(),
"unsupported.dbms.enable_native_schema_index", "false" "unsupported.dbms.enable_native_schema_index", "false"
), getLog() ); ), getLog() );
assertEquals( "Should keep pre configured default schema index.", assertEquals( "Should keep pre configured default schema index.",
migratedProperties, stringMap( "dbms.index.default_schema_provider", NATIVE10.param() ) ); migratedProperties, stringMap( "dbms.index.default_schema_provider", NATIVE10.providerName() ) );
assertContainsWarningMessage(); assertContainsWarningMessage();
} }


Expand Down
Expand Up @@ -37,7 +37,6 @@ public class LuceneIndexStorageBuilder
private FileSystemAbstraction fileSystem; private FileSystemAbstraction fileSystem;
private File indexRootFolder; private File indexRootFolder;
private PartitionedIndexStorage indexStorage; private PartitionedIndexStorage indexStorage;
private boolean archiveFailed;


private LuceneIndexStorageBuilder() private LuceneIndexStorageBuilder()
{ {
Expand Down Expand Up @@ -65,7 +64,7 @@ public PartitionedIndexStorage build()
Objects.requireNonNull( directoryFactory ); Objects.requireNonNull( directoryFactory );
Objects.requireNonNull( fileSystem ); Objects.requireNonNull( fileSystem );
Objects.requireNonNull( indexRootFolder ); Objects.requireNonNull( indexRootFolder );
indexStorage = new PartitionedIndexStorage( directoryFactory, fileSystem, indexRootFolder, archiveFailed ); indexStorage = new PartitionedIndexStorage( directoryFactory, fileSystem, indexRootFolder );
} }
return indexStorage; return indexStorage;
} }
Expand Down Expand Up @@ -117,10 +116,4 @@ public LuceneIndexStorageBuilder withIndexStorage( PartitionedIndexStorage index
this.indexStorage = indexStorage; this.indexStorage = indexStorage;
return this; return this;
} }

public LuceneIndexStorageBuilder archivingFailed( boolean archiveFailed )
{
this.archiveFailed = archiveFailed;
return this;
}
} }
Expand Up @@ -35,8 +35,8 @@ public IndexStorageFactory( DirectoryFactory dirFactory, FileSystemAbstraction f
this.structure = structure; this.structure = structure;
} }


public PartitionedIndexStorage indexStorageOf( long indexId, boolean archiveFailed ) public PartitionedIndexStorage indexStorageOf( long indexId )
{ {
return new PartitionedIndexStorage( dirFactory, fileSystem, structure.directoryForIndex( indexId ), archiveFailed ); return new PartitionedIndexStorage( dirFactory, fileSystem, structure.directoryForIndex( indexId ) );
} }
} }
Expand Up @@ -21,8 +21,6 @@


import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;


import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
Expand All @@ -32,8 +30,6 @@
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Stream; import java.util.stream.Stream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;


import org.neo4j.io.IOUtils; import org.neo4j.io.IOUtils;
import org.neo4j.io.fs.FileSystemAbstraction; import org.neo4j.io.fs.FileSystemAbstraction;
Expand All @@ -56,15 +52,12 @@ public class PartitionedIndexStorage


private final DirectoryFactory directoryFactory; private final DirectoryFactory directoryFactory;
private final FileSystemAbstraction fileSystem; private final FileSystemAbstraction fileSystem;
private final boolean archiveFailed;
private final FolderLayout folderLayout; private final FolderLayout folderLayout;
private final FailureStorage failureStorage; private final FailureStorage failureStorage;


public PartitionedIndexStorage( DirectoryFactory directoryFactory, FileSystemAbstraction fileSystem, public PartitionedIndexStorage( DirectoryFactory directoryFactory, FileSystemAbstraction fileSystem, File rootFolder )
File rootFolder, boolean archiveFailed )
{ {
this.fileSystem = fileSystem; this.fileSystem = fileSystem;
this.archiveFailed = archiveFailed;
this.folderLayout = new IndexFolderLayout( rootFolder ); this.folderLayout = new IndexFolderLayout( rootFolder );
this.directoryFactory = directoryFactory; this.directoryFactory = directoryFactory;
this.failureStorage = new FailureStorage( fileSystem, folderLayout ); this.failureStorage = new FailureStorage( fileSystem, folderLayout );
Expand Down Expand Up @@ -146,7 +139,7 @@ public String getStoredIndexFailure()
*/ */
public void prepareFolder( File folder ) throws IOException public void prepareFolder( File folder ) throws IOException
{ {
cleanupFolder( folder, archiveFailed ); cleanupFolder( folder );
fileSystem.mkdirs( folder ); fileSystem.mkdirs( folder );
} }


Expand All @@ -158,43 +151,18 @@ public void prepareFolder( File folder ) throws IOException
* @throws IOException if some removal operation fails. * @throws IOException if some removal operation fails.
*/ */
public void cleanupFolder( File folder ) throws IOException public void cleanupFolder( File folder ) throws IOException
{
cleanupFolder( folder, false );
}

private void cleanupFolder( File folder, boolean archiveFailed ) throws IOException
{ {
List<File> partitionFolders = listFolders( folder ); List<File> partitionFolders = listFolders( folder );
if ( !partitionFolders.isEmpty() ) if ( !partitionFolders.isEmpty() )
{ {
try ( ZipOutputStream zip = archiveFile( folder, archiveFailed ) ) for ( File partitionFolder : partitionFolders )
{ {
byte[] buffer = null; cleanupLuceneDirectory( partitionFolder );
if ( zip != null )
{
buffer = new byte[4 * 1024];
}
for ( File partitionFolder : partitionFolders )
{
cleanupLuceneDirectory( partitionFolder, zip, buffer );
}
} }
} }
fileSystem.deleteRecursively( folder ); fileSystem.deleteRecursively( folder );
} }


private ZipOutputStream archiveFile( File folder, boolean archiveFailed ) throws IOException
{
ZipOutputStream zip = null;
if ( archiveFailed )
{
File archiveFile = new File( folder.getParent(),
"archive-" + folder.getName() + "-" + System.currentTimeMillis() + ".zip" );
zip = new ZipOutputStream( fileSystem.openAsOutputStream( archiveFile, false ) );
}
return zip;
}

/** /**
* Opens all {@link Directory lucene directories} contained in the {@link #getIndexFolder() index folder}. * Opens all {@link Directory lucene directories} contained in the {@link #getIndexFolder() index folder}.
* *
Expand Down Expand Up @@ -256,38 +224,15 @@ private List<File> listFolders( File rootFolder )
* Uses {@link FileUtils#windowsSafeIOOperation(FileUtils.FileOperation)} underneath. * Uses {@link FileUtils#windowsSafeIOOperation(FileUtils.FileOperation)} underneath.
* *
* @param folder the path to the directory to cleanup. * @param folder the path to the directory to cleanup.
* @param zip an optional zip output stream to archive files into.
* @param buffer a byte buffer to use for copying bytes from the files into the archive.
* @throws IOException if removal operation fails. * @throws IOException if removal operation fails.
*/ */
private void cleanupLuceneDirectory( File folder, ZipOutputStream zip, byte[] buffer ) throws IOException private void cleanupLuceneDirectory( File folder ) throws IOException
{ {
try ( Directory dir = directoryFactory.open( folder ) ) try ( Directory dir = directoryFactory.open( folder ) )
{ {
String folderName = folder.getName() + "/";
if ( zip != null )
{
zip.putNextEntry( new ZipEntry( folderName ) );
zip.closeEntry();
}
String[] indexFiles = dir.listAll(); String[] indexFiles = dir.listAll();
for ( String indexFile : indexFiles ) for ( String indexFile : indexFiles )
{ {
if ( zip != null )
{
zip.putNextEntry( new ZipEntry( folderName + indexFile ) );
try ( IndexInput input = dir.openInput( indexFile, IOContext.READ ) )
{
for ( long pos = 0, size = input.length(); pos < size; )
{
int read = Math.min( buffer.length, (int) (size - pos) );
input.readBytes( buffer, 0, read );
pos += read;
zip.write( buffer, 0, read );
}
}
zip.closeEntry();
}
FileUtils.windowsSafeIOOperation( () -> dir.deleteFile( indexFile ) ); FileUtils.windowsSafeIOOperation( () -> dir.deleteFile( indexFile ) );
} }
} }
Expand Down
Expand Up @@ -22,7 +22,6 @@
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;


import org.neo4j.graphdb.factory.GraphDatabaseSettings;
import org.neo4j.internal.kernel.api.IndexCapability; import org.neo4j.internal.kernel.api.IndexCapability;
import org.neo4j.internal.kernel.api.InternalIndexState; import org.neo4j.internal.kernel.api.InternalIndexState;
import org.neo4j.io.fs.FileSystemAbstraction; import org.neo4j.io.fs.FileSystemAbstraction;
Expand Down Expand Up @@ -168,7 +167,7 @@ public String getPopulationFailure( long indexId, SchemaIndexDescriptor descript


private PartitionedIndexStorage getIndexStorage( long indexId ) private PartitionedIndexStorage getIndexStorage( long indexId )
{ {
return indexStorageFactory.indexStorageOf( indexId, config.get( GraphDatabaseSettings.archive_failed_index ) ); return indexStorageFactory.indexStorageOf( indexId );
} }


private boolean indexIsOnline( PartitionedIndexStorage indexStorage, SchemaIndexDescriptor descriptor ) throws IOException private boolean indexIsOnline( PartitionedIndexStorage indexStorage, SchemaIndexDescriptor descriptor ) throws IOException
Expand Down
Expand Up @@ -108,7 +108,7 @@ public static FusionIndexProvider newInstance( PageCache pageCache, File storeDi


String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider ); String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider );
int priority = LuceneIndexProvider.PRIORITY; int priority = LuceneIndexProvider.PRIORITY;
if ( GraphDatabaseSettings.SchemaIndex.LUCENE10.param().equals( defaultSchemaProvider ) ) if ( GraphDatabaseSettings.SchemaIndex.LUCENE10.providerName().equals( defaultSchemaProvider ) )
{ {
priority = 100; priority = 100;
} }
Expand Down
Expand Up @@ -23,7 +23,6 @@
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;


import org.neo4j.function.Factory; import org.neo4j.function.Factory;
import org.neo4j.graphdb.factory.GraphDatabaseSettings;
import org.neo4j.kernel.api.impl.index.IndexWriterConfigs; import org.neo4j.kernel.api.impl.index.IndexWriterConfigs;
import org.neo4j.kernel.api.impl.index.builder.AbstractLuceneIndexBuilder; import org.neo4j.kernel.api.impl.index.builder.AbstractLuceneIndexBuilder;
import org.neo4j.kernel.api.impl.index.partition.ReadOnlyIndexPartitionFactory; import org.neo4j.kernel.api.impl.index.partition.ReadOnlyIndexPartitionFactory;
Expand Down Expand Up @@ -103,8 +102,7 @@ public SchemaIndex build()
} }
else else
{ {
Boolean archiveFailed = getConfig( GraphDatabaseSettings.archive_failed_index ); PartitionedIndexStorage storage = storageBuilder.build();
PartitionedIndexStorage storage = storageBuilder.archivingFailed( archiveFailed ).build();
return new WritableDatabaseSchemaIndex( storage, descriptor, samplingConfig, return new WritableDatabaseSchemaIndex( storage, descriptor, samplingConfig,
new WritableIndexPartitionFactory( writerConfigFactory ) ); new WritableIndexPartitionFactory( writerConfigFactory ) );
} }
Expand Down
Expand Up @@ -89,7 +89,7 @@ public static FusionIndexProvider create( PageCache pageCache, File storeDir, Fi


String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider ); String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider );
int priority = PRIORITY; int priority = PRIORITY;
if ( GraphDatabaseSettings.SchemaIndex.NATIVE10.param().equals( defaultSchemaProvider ) ) if ( GraphDatabaseSettings.SchemaIndex.NATIVE10.providerName().equals( defaultSchemaProvider ) )
{ {
priority = 100; priority = 100;
} }
Expand Down
Expand Up @@ -91,7 +91,7 @@ public static FusionIndexProvider create( PageCache pageCache, File storeDir, Fi


String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider ); String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider );
int priority = PRIORITY; int priority = PRIORITY;
if ( GraphDatabaseSettings.SchemaIndex.NATIVE20.param().equals( defaultSchemaProvider ) ) if ( GraphDatabaseSettings.SchemaIndex.NATIVE20.providerName().equals( defaultSchemaProvider ) )
{ {
priority = 100; priority = 100;
} }
Expand Down
Expand Up @@ -178,12 +178,12 @@ private IndexProvider selectIndexProvider( PageCache pageCache, File storeDir, F
{ {
String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider ); String defaultSchemaProvider = config.get( GraphDatabaseSettings.default_schema_provider );
RecoveryCleanupWorkCollector recoveryCleanupWorkCollector = RecoveryCleanupWorkCollector.IMMEDIATE; RecoveryCleanupWorkCollector recoveryCleanupWorkCollector = RecoveryCleanupWorkCollector.IMMEDIATE;
if ( LUCENE10.param().equals( defaultSchemaProvider ) ) if ( LUCENE10.providerName().equals( defaultSchemaProvider ) )
{ {
return LuceneIndexProviderFactory return LuceneIndexProviderFactory
.newInstance( pageCache, storeDir, fs, monitor, config, operationalMode, recoveryCleanupWorkCollector ); .newInstance( pageCache, storeDir, fs, monitor, config, operationalMode, recoveryCleanupWorkCollector );
} }
else if ( NATIVE10.param().equals( defaultSchemaProvider ) ) else if ( NATIVE10.providerName().equals( defaultSchemaProvider ) )
{ {
return NativeLuceneFusionIndexProviderFactory10 return NativeLuceneFusionIndexProviderFactory10
.create( pageCache, storeDir, fs, monitor, config, operationalMode, recoveryCleanupWorkCollector ); .create( pageCache, storeDir, fs, monitor, config, operationalMode, recoveryCleanupWorkCollector );
Expand Down
Expand Up @@ -147,7 +147,7 @@ public void saveCallCloseAndDropFromMultipleThreads() throws Exception
private WritableTestDatabaseIndex createTestLuceneIndex( DirectoryFactory dirFactory, File folder ) throws IOException private WritableTestDatabaseIndex createTestLuceneIndex( DirectoryFactory dirFactory, File folder ) throws IOException
{ {
PartitionedIndexStorage indexStorage = new PartitionedIndexStorage( PartitionedIndexStorage indexStorage = new PartitionedIndexStorage(
dirFactory, fileSystemRule.get(), folder, false ); dirFactory, fileSystemRule.get(), folder );
WritableTestDatabaseIndex index = new WritableTestDatabaseIndex( indexStorage ); WritableTestDatabaseIndex index = new WritableTestDatabaseIndex( indexStorage );
index.create(); index.create();
index.open(); index.open();
Expand Down
Expand Up @@ -68,7 +68,7 @@ public class PartitionedIndexStorageTest
public void createIndexStorage() public void createIndexStorage()
{ {
fs = fsRule.get(); fs = fsRule.get();
storage = new PartitionedIndexStorage( getOrCreateDirFactory( fs ), fs, testDir.graphDbDir(), false ); storage = new PartitionedIndexStorage( getOrCreateDirFactory( fs ), fs, testDir.graphDbDir() );
} }


@Test @Test
Expand Down Expand Up @@ -182,7 +182,7 @@ public File[] listFiles( File directory )
} ) } )
{ {
PartitionedIndexStorage myStorage = new PartitionedIndexStorage( getOrCreateDirFactory( scramblingFs ), PartitionedIndexStorage myStorage = new PartitionedIndexStorage( getOrCreateDirFactory( scramblingFs ),
scramblingFs, testDir.graphDbDir(), false ); scramblingFs, testDir.graphDbDir() );
File parent = myStorage.getIndexFolder(); File parent = myStorage.getIndexFolder();
int directoryCount = 10; int directoryCount = 10;
for ( int i = 0; i < directoryCount; i++ ) for ( int i = 0; i < directoryCount; i++ )
Expand Down
Expand Up @@ -43,12 +43,10 @@


import static java.util.Arrays.asList; import static java.util.Arrays.asList;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;

import static org.junit.Assert.assertEquals;
import static org.neo4j.kernel.api.impl.schema.LuceneIndexProviderFactory.PROVIDER_DESCRIPTOR; import static org.neo4j.kernel.api.impl.schema.LuceneIndexProviderFactory.PROVIDER_DESCRIPTOR;
import static org.neo4j.kernel.api.index.IndexDirectoryStructure.directoriesByProviderKey; import static org.neo4j.kernel.api.index.IndexDirectoryStructure.directoriesByProviderKey;


import static org.junit.Assert.assertEquals;

public class AccessUniqueDatabaseIndexTest public class AccessUniqueDatabaseIndexTest
{ {
@Rule @Rule
Expand Down Expand Up @@ -151,7 +149,7 @@ private PartitionedIndexStorage getIndexStorage()
{ {
IndexStorageFactory storageFactory = new IndexStorageFactory( directoryFactory, fileSystemRule.get(), IndexStorageFactory storageFactory = new IndexStorageFactory( directoryFactory, fileSystemRule.get(),
directoriesByProviderKey( storeDirectory ).forProvider( PROVIDER_DESCRIPTOR ) ); directoriesByProviderKey( storeDirectory ).forProvider( PROVIDER_DESCRIPTOR ) );
return storageFactory.indexStorageOf( 1, false ); return storageFactory.indexStorageOf( 1 );
} }


private IndexEntryUpdate<?> add( long nodeId, Object propertyValue ) private IndexEntryUpdate<?> add( long nodeId, Object propertyValue )
Expand Down

0 comments on commit 37ab919

Please sign in to comment.