Skip to content

Commit

Permalink
Update stage-compiler version removing limitation that stage names sh…
Browse files Browse the repository at this point in the history
…ouldn't be prefixes of each other
  • Loading branch information
leventov committed Sep 12, 2015
1 parent 23d9425 commit 4606b64
Show file tree
Hide file tree
Showing 11 changed files with 30 additions and 33 deletions.
2 changes: 1 addition & 1 deletion pom.xml
Expand Up @@ -148,7 +148,7 @@
<dependency>
<groupId>net.openhft</groupId>
<artifactId>stage-compiler</artifactId>
<version>1.1</version>
<version>1.2</version>
<scope>provided</scope>
</dependency>

Expand Down
Expand Up @@ -114,13 +114,13 @@ public Data<K> key() {
return entryKey;
}

@Stage("TheEntrySizeInChunks") public int entrySizeInChunks = 0;
@Stage("EntrySizeInChunks") public int entrySizeInChunks = 0;

void initTheEntrySizeInChunks() {
void initEntrySizeInChunks() {
entrySizeInChunks = hh.h().inChunks(entrySize());
}

public void initTheEntrySizeInChunks(int actuallyUsedChunks) {
public void initEntrySizeInChunks(int actuallyUsedChunks) {
entrySizeInChunks = actuallyUsedChunks;
}

Expand Down
Expand Up @@ -42,20 +42,18 @@ public abstract class SegmentStages implements SegmentLock {
@StageRef public VanillaChronicleHashHolder<?, ?, ?> hh;
@StageRef public CheckOnEachPublicOperation checkOnEachPublicOperation;

@Stage("TheSegmentIndex") public int segmentIndex = -1;
public int segmentIndex = -1;

public void initTheSegmentIndex(int segmentIndex) {
public void initSegmentIndex(int segmentIndex) {
this.segmentIndex = segmentIndex;
}

// "SegHeader" because stage-generator doesn't support stage names - one is a prefix of another
// there is a stage named "Segment"
@Stage("SegHeader") long segmentHeaderAddress;
@Stage("SegHeader") SegmentHeader segmentHeader = null;
@Stage("SegmentHeader") long segmentHeaderAddress;
@Stage("SegmentHeader") SegmentHeader segmentHeader = null;

abstract boolean segHeaderInit();
abstract boolean segmentHeaderInit();

private void initSegHeader() {
private void initSegmentHeader() {
segmentHeaderAddress = hh.h().ms.address() + hh.h().segmentHeaderOffset(segmentIndex);
segmentHeader = BigSegmentHeader.INSTANCE;
}
Expand Down Expand Up @@ -188,7 +186,7 @@ void initLocks() {
@Stage("Locks")
boolean tryFindInitLocksOfThisSegment(Object thisContext, int index) {
SegmentStages c = chaining.contextAtIndexInChain(index);
if (c.segHeaderInit() &&
if (c.segmentHeaderInit() &&
c.segmentHeaderAddress == segmentHeaderAddress &&
c.locksInit()) {
SegmentStages root = c.rootContextOnThisSegment;
Expand Down
Expand Up @@ -25,7 +25,7 @@ public abstract class QuerySegmentStages extends SegmentStages {

@StageRef HashQuery q;

void initTheSegmentIndex() {
void initSegmentIndex() {
segmentIndex = hh.h().hashSplitting.segmentIndex(q.hashOfKey);
}
}
Expand Up @@ -635,7 +635,7 @@ public boolean nextEntry(@NotNull EntryCallback entryCallback, int chronicleId)
int segmentIndex = (int) (position >>> segmentIndexShift);
try (CompiledReplicatedMapIterationContext<K, KI, MKI, V, VI, MVI, R, ?> context =
iterationContext()) {
context.initTheSegmentIndex(segmentIndex);
context.initSegmentIndex(segmentIndex);
context.updateLock().lock();
if (changesForUpdates.get(position)) {

Expand Down Expand Up @@ -691,7 +691,7 @@ public void dirtyEntries(long fromTimeStamp) {
boolean debugEnabled = LOG.isDebugEnabled();
for (int i = 0; i < actualSegments; i++) {
final int segmentIndex = i;
c.initTheSegmentIndex(segmentIndex);
c.initSegmentIndex(segmentIndex);
c.forEachSegmentReplicableEntry(e -> {
if (debugEnabled) {
LOG.debug("Bootstrap entry: id {}, key {}, value {}", localIdentifier,
Expand Down
Expand Up @@ -356,7 +356,7 @@ public QueryContextInterface<K, V, R> queryContext(Data<K> key) {
@Override
public MapSegmentContext<K, V, ?> segmentContext(int segmentIndex) {
IterationContextInterface<K, V, ?> c = iterationContext();
c.initTheSegmentIndex(segmentIndex);
c.initSegmentIndex(segmentIndex);
return c;
}

Expand Down
Expand Up @@ -23,5 +23,5 @@ public interface IterationContextInterface<K, V, R> extends MapEntry<K, V>,
MapSegmentContext<K, V, R> {
long pos();

void initTheSegmentIndex(int segmentIndex);
void initSegmentIndex(int segmentIndex);
}
Expand Up @@ -38,9 +38,9 @@ public interface QueryContextInterface<K, V, R> extends ExternalMapQueryContext<

Closeable acquireHandle();

void initTheSegmentIndex(int segmentIndex);
void initSegmentIndex(int segmentIndex);

boolean theSegmentIndexInit();
boolean segmentIndexInit();

void clear();
}
Expand Up @@ -19,8 +19,6 @@
import net.openhft.chronicle.hash.Data;
import net.openhft.chronicle.hash.impl.stage.entry.AllocatedChunks;
import net.openhft.chronicle.hash.impl.stage.entry.HashEntryStages;
import net.openhft.chronicle.hash.impl.stage.query.HashLookupSearch;
import net.openhft.chronicle.hash.impl.stage.query.KeySearch;
import net.openhft.chronicle.map.MapEntry;
import net.openhft.chronicle.map.VanillaChronicleMap;
import net.openhft.chronicle.map.impl.VanillaChronicleMapHolder;
Expand All @@ -47,47 +45,47 @@ void initValueSizeOffset() {
valueSizeOffset = countValueSizeOffset();
}

@Stage("ValSize") public long valueSize = -1;
@Stage("ValSize") public long valueOffset;
@Stage("ValueSize") public long valueSize = -1;
@Stage("ValueSize") public long valueOffset;

@Stage("ValSize")
@Stage("ValueSize")
private void countValueOffset() {
mh.m().alignment.alignPositionAddr(entryBytes);
valueOffset = entryBytes.position();
}

void initValSize(long valueSize) {
void initValueSize(long valueSize) {
this.valueSize = valueSize;
entryBytes.position(valueSizeOffset);
mh.m().valueSizeMarshaller.writeSize(entryBytes, valueSize);
countValueOffset();
}

void initValSize() {
void initValueSize() {
entryBytes.position(valueSizeOffset);
valueSize = mh.m().readValueSize(entryBytes);
countValueOffset();
}

void initValSizeEqualToOld(long oldValueSizeOffset, long oldValueSize, long oldValueOffset) {
void initValueSize_EqualToOld(long oldValueSizeOffset, long oldValueSize, long oldValueOffset) {
valueSize = oldValueSize;
valueOffset = valueSizeOffset + (oldValueOffset - oldValueSizeOffset);
}

public void initValue(Data<?> value) {
entryBytes.position(valueSizeOffset);
initValSize(value.size());
initValueSize(value.size());
writeValue(value);
}

public void writeValue(Data<?> value) {
value.writeTo(entryBS, valueOffset);
}

public void initValueWithoutSize(
public void initValue_WithoutSize(
Data<?> value, long oldValueSizeOffset, long oldValueSize, long oldValueOffset) {
assert oldValueSize == value.size();
initValSizeEqualToOld(oldValueSizeOffset, oldValueSize, oldValueOffset);
initValueSize_EqualToOld(oldValueSizeOffset, oldValueSize, oldValueOffset);
writeValue(value);
}

Expand Down Expand Up @@ -221,7 +219,7 @@ public final void freeExtraAllocatedChunks() {
entrySizeInChunks < allocatedChunks.allocatedChunks) {
s.free(pos + entrySizeInChunks, allocatedChunks.allocatedChunks - entrySizeInChunks);
} else {
initTheEntrySizeInChunks(allocatedChunks.allocatedChunks);
initEntrySizeInChunks(allocatedChunks.allocatedChunks);
}
}
}
Expand Up @@ -65,6 +65,7 @@ public void initReplicatedInputBytes(Bytes replicatedInputBytes) {
replicatedInputStore.setBytes(replicatedInputBytes);
}

// ri for "replication input"
@Stage("ReplicationInput") public long bootstrapTimestamp;
@Stage("ReplicationInput") public long riKeySize = -1;
@Stage("ReplicationInput") public long riValueSize;
Expand Down
Expand Up @@ -70,7 +70,7 @@ public void putValueDeletedEntry(Data<V> newValue) {
long oldValueSize = valueSize;
long oldValueOffset = valueOffset;
allocatedChunks.initEntryAndKeyCopying(entrySize, valueOffset - entryStartOffset);
initValueWithoutSize(newValue, oldValueSizeOffset, oldValueSize, oldValueOffset);
initValue_WithoutSize(newValue, oldValueSizeOffset, oldValueSize, oldValueOffset);
}
freeExtraAllocatedChunks();
}
Expand Down

0 comments on commit 4606b64

Please sign in to comment.