Skip to content

Commit

Permalink
Renamed InputEntityCacher and InputEntityReader
Browse files Browse the repository at this point in the history
To InputEntityCacheWriter and InputEntityCacheReader respectively, also
its subclasses accordingly.
  • Loading branch information
tinwelint committed Jan 10, 2018
1 parent f733b30 commit ecbf1e5
Show file tree
Hide file tree
Showing 8 changed files with 29 additions and 29 deletions.
Expand Up @@ -145,13 +145,13 @@ public InputCache( FileSystemAbstraction fs, File cacheDirectory, RecordFormats

public InputCacher cacheNodes( String subType ) throws IOException
{
return new InputNodeCacher( channel( NODES, subType, READ_WRITE ), channel( NODES_HEADER, subType, READ_WRITE ),
return new InputNodeCacheWriter( channel( NODES, subType, READ_WRITE ), channel( NODES_HEADER, subType, READ_WRITE ),
recordFormats, chunkSize );
}

public InputCacher cacheRelationships( String subType ) throws IOException
{
return new InputRelationshipCacher( channel( RELATIONSHIPS, subType, READ_WRITE ),
return new InputRelationshipCacheWriter( channel( RELATIONSHIPS, subType, READ_WRITE ),
channel( RELATIONSHIPS_HEADER, subType, READ_WRITE ), recordFormats, chunkSize );
}

Expand All @@ -168,14 +168,14 @@ private File file( String type, String subType )

public InputIterable nodes( String subType, boolean deleteAfterUse )
{
return entities( () -> new InputNodeReader( channel( NODES, subType, READ ),
return entities( () -> new InputNodeCacheReader( channel( NODES, subType, READ ),
channel( NODES_HEADER, subType, READ ),
deleteAction( deleteAfterUse, NODES, NODES_HEADER, subType ) ) );
}

public InputIterable relationships( String subType, boolean deleteAfterUse )
{
return entities( () -> new InputRelationshipReader( channel( RELATIONSHIPS, subType, READ ),
return entities( () -> new InputRelationshipCacheReader( channel( RELATIONSHIPS, subType, READ ),
channel( RELATIONSHIPS_HEADER, subType, READ ),
deleteAction( deleteAfterUse, RELATIONSHIPS, RELATIONSHIPS_HEADER, subType ) ) );
}
Expand Down
Expand Up @@ -43,12 +43,12 @@
import static org.neo4j.unsafe.impl.batchimport.input.InputCache.newChunkHeaderBuffer;

/**
* Abstract class for reading cached entities previously stored using {@link InputEntityCacher} or derivative.
* Abstract class for reading cached entities previously stored using {@link InputEntityCacheWriter} or derivative.
* Entity data is read in batches, each handed off to one ore more processors which interprets the bytes
* into input data. From the outside this is simply an {@link InputIterator},
* the parallelization happens inside.
*/
abstract class InputEntityReader implements InputIterator
abstract class InputEntityCacheReader implements InputIterator
{
// Used by workers, immutable
private final PrimitiveIntObjectMap<String>[] tokens;
Expand All @@ -60,7 +60,7 @@ abstract class InputEntityReader implements InputIterator
private boolean end;

@SuppressWarnings( "unchecked" )
InputEntityReader( StoreChannel channel, StoreChannel header, Runnable closeAction )
InputEntityCacheReader( StoreChannel channel, StoreChannel header, Runnable closeAction )
throws IOException
{
tokens = new PrimitiveIntObjectMap[HIGH_TOKEN_TYPE];
Expand Down Expand Up @@ -153,7 +153,7 @@ void initialize( long startPosition, int chunkLength ) throws IOException
}
buffer.clear();
buffer.limit( chunkLength );
InputEntityReader.this.channel.read( buffer, startPosition );
InputEntityCacheReader.this.channel.read( buffer, startPosition );
buffer.flip();
clearState();
}
Expand Down
Expand Up @@ -54,7 +54,7 @@
* its thread-local buffer. When full it {@link #writeChunk(ByteBuffer) writes} that chunk of data to the {@link #channel},
* clearing its buffer and ready to serialize more data into it.
*/
abstract class InputEntityCacher implements InputCacher
abstract class InputEntityCacheWriter implements InputCacher
{
static final String[] EMPTY_STRING_ARRAY = new String[0];

Expand All @@ -70,7 +70,7 @@ abstract class InputEntityCacher implements InputCacher
@SuppressWarnings( "unchecked" )
private final Map<String,Integer>[] tokens = new Map[HIGH_TOKEN_TYPE];

protected InputEntityCacher( StoreChannel channel, StoreChannel header, RecordFormats recordFormats, int chunkSize )
protected InputEntityCacheWriter( StoreChannel channel, StoreChannel header, RecordFormats recordFormats, int chunkSize )
{
this.chunkSize = chunkSize;
initMaxTokenKeyIds( recordFormats );
Expand Down
Expand Up @@ -31,11 +31,11 @@
import static org.neo4j.unsafe.impl.batchimport.input.InputCache.LABEL_TOKEN;

/**
* Reads cached input nodes previously stored using {@link InputNodeCacher}.
* Reads cached input nodes previously stored using {@link InputNodeCacheWriter}.
*/
public class InputNodeReader extends InputEntityReader
public class InputNodeCacheReader extends InputEntityCacheReader
{
public InputNodeReader( StoreChannel channel, StoreChannel header, Runnable closeAction ) throws IOException
public InputNodeCacheReader( StoreChannel channel, StoreChannel header, Runnable closeAction ) throws IOException
{
super( channel, header, closeAction );
}
Expand All @@ -48,7 +48,7 @@ public InputChunk newChunk()

class InputNodeDeserializer extends InputEntityDeserializer
{
protected String[] previousLabels = InputEntityCacher.EMPTY_STRING_ARRAY;
protected String[] previousLabels = InputEntityCacheWriter.EMPTY_STRING_ARRAY;

@Override
public boolean next( InputEntityVisitor visitor ) throws IOException
Expand Down
Expand Up @@ -34,9 +34,9 @@
/**
* Caches input nodes to disk using a binary format.
*/
public class InputNodeCacher extends InputEntityCacher
public class InputNodeCacheWriter extends InputEntityCacheWriter
{
public InputNodeCacher( StoreChannel channel, StoreChannel header, RecordFormats recordFormats, int chunkSize )
public InputNodeCacheWriter( StoreChannel channel, StoreChannel header, RecordFormats recordFormats, int chunkSize )
throws IOException
{
super( channel, header, recordFormats, chunkSize );
Expand Down
Expand Up @@ -29,11 +29,11 @@
import static org.neo4j.unsafe.impl.batchimport.input.InputCache.SAME_TYPE;

/**
* Reads cached input relationships previously stored using {@link InputRelationshipCacher}.
* Reads cached input relationships previously stored using {@link InputRelationshipCacheWriter}.
*/
public class InputRelationshipReader extends InputEntityReader
public class InputRelationshipCacheReader extends InputEntityCacheReader
{
public InputRelationshipReader( StoreChannel channel, StoreChannel header, Runnable closeAction ) throws IOException
public InputRelationshipCacheReader( StoreChannel channel, StoreChannel header, Runnable closeAction ) throws IOException
{
super( channel, header, closeAction );
}
Expand Down
Expand Up @@ -32,9 +32,9 @@
/**
* Caches input relationships to disk using a binary format.
*/
public class InputRelationshipCacher extends InputEntityCacher
public class InputRelationshipCacheWriter extends InputEntityCacheWriter
{
public InputRelationshipCacher( StoreChannel channel, StoreChannel header, RecordFormats recordFormats, int chunkSize )
public InputRelationshipCacheWriter( StoreChannel channel, StoreChannel header, RecordFormats recordFormats, int chunkSize )
throws IOException
{
super( channel, header, recordFormats, chunkSize );
Expand Down
Expand Up @@ -105,7 +105,7 @@ public void allowCreationOfSupportedNumberOfRelationshipTypes() throws IOExcepti
private void cacheRelationship( int iterations, int maxNumberOfRelationshipTypes ) throws IOException
{
RecordFormats recordFormats = mockRecordFormats( 1000, 1000, maxNumberOfRelationshipTypes, 1000 );
try ( InputRelationshipCacher cacher = getRelationshipCacher( recordFormats );
try ( InputRelationshipCacheWriter cacher = getRelationshipCacher( recordFormats );
InputEntityVisitor visitor = cacher.wrap( new InputEntity() ) )
{
for ( int i = 0; i < iterations; i++ )
Expand All @@ -119,7 +119,7 @@ private void cacheLabels( int iterations, int maxNumberOfLabels ) throws IOExcep
{
RecordFormats recordFormats = mockRecordFormats( 1000, maxNumberOfLabels, 1000, 1000 );

try ( InputNodeCacher cacher = getNodeCacher( recordFormats );
try ( InputNodeCacheWriter cacher = getNodeCacher( recordFormats );
InputEntityVisitor visitor = cacher.wrap( new InputEntity() ) )
{
for ( int i = 0; i < iterations; i++ )
Expand All @@ -133,7 +133,7 @@ private void cacheGroups( int iterations, int maxNumberOfGroups ) throws IOExcep
{
RecordFormats recordFormats = mockRecordFormats( 1000, 1000, 1000, maxNumberOfGroups );

try ( InputNodeCacher cacher = getNodeCacher( recordFormats );
try ( InputNodeCacheWriter cacher = getNodeCacher( recordFormats );
InputEntityVisitor visitor = cacher.wrap( new InputEntity() ) )
{
Randoms randoms = getRandoms();
Expand All @@ -148,7 +148,7 @@ private void cacheNodeWithProperties( int iterations, int maxNumberOfProperties
{
RecordFormats recordFormats = mockRecordFormats( maxNumberOfProperties, 1000, 1000, 1000 );

try ( InputNodeCacher cacher = getNodeCacher( recordFormats );
try ( InputNodeCacheWriter cacher = getNodeCacher( recordFormats );
InputEntityVisitor visitor = cacher.wrap( new InputEntity() ) )
{
Randoms randoms = getRandoms();
Expand Down Expand Up @@ -254,15 +254,15 @@ private Randoms getRandoms()
return new Randoms( randomRule.random(), Randoms.DEFAULT );
}

private InputNodeCacher getNodeCacher( RecordFormats recordFormats ) throws IOException
private InputNodeCacheWriter getNodeCacher( RecordFormats recordFormats ) throws IOException
{
return new InputNodeCacher( mock( StoreChannel.class ),
return new InputNodeCacheWriter( mock( StoreChannel.class ),
mock( StoreChannel.class ), recordFormats, 100 );
}

private InputRelationshipCacher getRelationshipCacher( RecordFormats recordFormats ) throws IOException
private InputRelationshipCacheWriter getRelationshipCacher( RecordFormats recordFormats ) throws IOException
{
return new InputRelationshipCacher( mock( StoreChannel.class ),
return new InputRelationshipCacheWriter( mock( StoreChannel.class ),
mock( StoreChannel.class ), recordFormats, 100 );
}
}

0 comments on commit ecbf1e5

Please sign in to comment.