/
EntityStoreUpdaterStep.java
196 lines (181 loc) · 8.16 KB
/
EntityStoreUpdaterStep.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/**
* Copyright (c) 2002-2015 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.unsafe.impl.batchimport;
import java.util.Iterator;
import org.neo4j.kernel.impl.store.AbstractDynamicStore;
import org.neo4j.kernel.impl.store.AbstractRecordStore;
import org.neo4j.kernel.impl.store.PropertyStore;
import org.neo4j.kernel.impl.store.PropertyType;
import org.neo4j.kernel.impl.store.record.DynamicRecord;
import org.neo4j.kernel.impl.store.record.PrimitiveRecord;
import org.neo4j.kernel.impl.store.record.PropertyBlock;
import org.neo4j.kernel.impl.store.record.PropertyRecord;
import org.neo4j.kernel.impl.transaction.state.PropertyCreator;
import org.neo4j.kernel.impl.util.ReusableIteratorCostume;
import org.neo4j.unsafe.impl.batchimport.input.InputEntity;
import org.neo4j.unsafe.impl.batchimport.staging.ProcessorStep;
import org.neo4j.unsafe.impl.batchimport.staging.StageControl;
import org.neo4j.unsafe.impl.batchimport.store.BatchingPageCache.WriterFactory;
import org.neo4j.unsafe.impl.batchimport.store.BatchingPropertyRecordAccess;
import org.neo4j.unsafe.impl.batchimport.store.io.IoMonitor;
import static java.lang.Math.max;
/**
* Writes {@link RecordBatch entity batches} to the underlying stores. Also makes final composition of the
* {@link BatchEntity entities} before writing, such as clumping up {@link PropertyBlock properties} into
* {@link PropertyRecord property records}.
*
* @param <RECORD> type of entities.
* @param <INPUT> type of input.
*/
public class EntityStoreUpdaterStep<RECORD extends PrimitiveRecord,INPUT extends InputEntity>
extends ProcessorStep<Batch<INPUT,RECORD>>
{
private final AbstractRecordStore<RECORD> entityStore;
private final PropertyStore propertyStore;
private final IoMonitor monitor;
private final WriterFactory writerFactory;
private final PropertyCreator propertyCreator;
// Reusable instances for less GC
private final BatchingPropertyRecordAccess propertyRecords = new BatchingPropertyRecordAccess();
private final ReusableIteratorCostume<PropertyBlock> blockIterator = new ReusableIteratorCostume<>();
EntityStoreUpdaterStep( StageControl control, Configuration config,
AbstractRecordStore<RECORD> entityStore,
PropertyStore propertyStore, IoMonitor monitor, WriterFactory writerFactory )
{
super( control, "v", 1, config.movingAverageSize(), 1, monitor ); // work-ahead doesn't matter, we're the last one
this.entityStore = entityStore;
this.propertyStore = propertyStore;
this.writerFactory = writerFactory;
this.propertyCreator = new PropertyCreator( propertyStore, null );
this.monitor = monitor;
this.monitor.reset();
}
@Override
protected Object process( long ticket, Batch<INPUT,RECORD> batch )
{
// Clear reused data structures
propertyRecords.close();
// Write the entity records, and at the same time allocate property records for its property blocks.
long highestId = 0;
RECORD[] records = batch.records;
int propertyBlockCursor = 0;
for ( int i = 0; i < records.length; i++ )
{
RECORD record = records[i];
int propertyBlockCount = batch.propertyBlocksLengths[i];
if ( record != null )
{
INPUT input = batch.input[i];
if ( input.hasFirstPropertyId() )
{
record.setNextProp( input.firstPropertyId() );
}
else
{
if ( propertyBlockCount > 0 )
{
reassignDynamicRecordIds( batch.propertyBlocks, propertyBlockCursor, propertyBlockCount );
long firstProp = propertyCreator.createPropertyChain( record,
blockIterator.dressArray( batch.propertyBlocks, propertyBlockCursor, propertyBlockCount ),
propertyRecords );
record.setNextProp( firstProp );
}
}
highestId = max( highestId, record.getId() );
entityStore.updateRecord( record );
}
else
{ // Here we have a relationship that refers to missing nodes. It's within the tolerance levels
// of number of bad relationships. Just don't import this relationship.
}
propertyBlockCursor += propertyBlockCount;
}
entityStore.setHighestPossibleIdInUse( highestId );
// Write all the created property records.
for ( PropertyRecord propertyRecord : propertyRecords.records() )
{
propertyStore.updateRecord( propertyRecord );
}
return null; // end of the line
}
private void reassignDynamicRecordIds( PropertyBlock[] blocks, int offset, int length )
{
// OK, so here we have property blocks, potentially referring to DynamicRecords. The DynamicRecords
// have ids that we need to re-assign in here, because the ids are generated by multiple property encoders,
// and so we let each one of the encoders generate their own bogus ids and we re-assign those ids here,
// where we know we have a single thread doing this.
for ( int i = 0; i < length; i++ )
{
PropertyBlock block = blocks[offset+i];
PropertyType type = block.getType();
switch ( type )
{
case STRING:
reassignDynamicRecordIds( block, type, propertyStore.getStringStore() );
break;
case ARRAY:
reassignDynamicRecordIds( block, type, propertyStore.getArrayStore() );
break;
default: // No need to do anything be default, we only need to relink for dynamic records
}
}
}
private void reassignDynamicRecordIds( PropertyBlock block, PropertyType type, AbstractDynamicStore store )
{
Iterator<DynamicRecord> dynamicRecords = block.getValueRecords().iterator();
long newId = store.nextId();
block.getValueBlocks()[0] = PropertyStore.singleBlockLongValue( block.getKeyIndexId(), type, newId );
while ( dynamicRecords.hasNext() )
{
DynamicRecord dynamicRecord = dynamicRecords.next();
dynamicRecord.setId( newId );
if ( dynamicRecords.hasNext() )
{
dynamicRecord.setNextBlock( newId = store.nextId() );
}
}
}
@Override
protected void done()
{
super.done();
// Stop the I/O monitor, since the stats in there is based on time passed since the start
// and bytes written. NodeStage and CalculateDenseNodesStage can be run in parallel so if
// NodeStage completes before CalculateDenseNodesStage then we want to stop the time in the I/O monitor.
monitor.stop();
}
// Below we override the "parallizable" methods to go directly towards the I/O writer, since
// this step is very cheap and not parallelizable, except for the I/O part which is all handled by the writer.
@Override
public int numberOfProcessors()
{
return writerFactory.numberOfProcessors();
}
@Override
public boolean incrementNumberOfProcessors()
{
return writerFactory.incrementNumberOfProcessors();
}
@Override
public boolean decrementNumberOfProcessors()
{
return writerFactory.decrementNumberOfProcessors();
}
}