Skip to content

Commit

Permalink
Limit HashLookup slot size with 4 or 8 bytes, back
Browse files Browse the repository at this point in the history
  • Loading branch information
leventov committed Sep 22, 2015
1 parent d9c771b commit b113118
Show file tree
Hide file tree
Showing 5 changed files with 158 additions and 46 deletions.
Expand Up @@ -16,13 +16,13 @@

package net.openhft.chronicle.hash.impl;

import net.openhft.lang.Maths;

import net.openhft.chronicle.core.Maths;

import static net.openhft.lang.MemoryUnit.BITS;
import static net.openhft.lang.MemoryUnit.BYTES;
import static net.openhft.lang.io.NativeBytes.UNSAFE;

public class CompactOffHeapLinearHashTable {
public abstract class CompactOffHeapLinearHashTable {
// to fit 64 bits per slot.
public static final int MAX_SEGMENT_CHUNKS = 1 << 30;
public static final int MAX_SEGMENT_ENTRIES = 1 << 29;
Expand All @@ -45,7 +45,12 @@ public static int keyBits(long entriesPerSegment, int valueBits) {
}

public static int entrySize(int keyBits, int valueBits) {
return (int) BYTES.alignAndConvert((long) (keyBits + valueBits), BITS);
int entrySize = (int) BYTES.alignAndConvert((long) (keyBits + valueBits), BITS);
if (entrySize <= 4)
return 4;
if (entrySize <= 8)
return 8;
return entrySize;
}

public static long capacityFor(long entriesPerSegment) {
Expand All @@ -68,33 +73,27 @@ public static long mask(int bits) {


private final long capacityMask;
private final int hashLookupEntrySize;
private final long capacityMask2;
final long capacityMask2;
private final int keyBits;
private final long keyMask;
private final long valueMask;
private final long entryMask;

CompactOffHeapLinearHashTable(long capacity, int entrySize, int keyBits, int valueBits) {
this.capacityMask = capacity - 1L;

this.hashLookupEntrySize = entrySize;
this.capacityMask2 = capacityMask * entrySize;

this.keyBits = keyBits;
this.keyMask = mask(keyBits);
this.valueMask = mask(valueBits);
this.entryMask = mask(keyBits + valueBits);
}

CompactOffHeapLinearHashTable(VanillaChronicleHash h) {
this(h.segmentHashLookupCapacity, h.segmentHashLookupEntrySize, h.segmentHashLookupKeyBits,
h.segmentHashLookupValueBits);
}

long indexToPos(long index) {
return index * hashLookupEntrySize;
}
abstract long indexToPos(long index);

public long maskUnsetKey(long key) {
return (key &= keyMask) != UNSET_KEY ? key : keyMask;
Expand All @@ -105,7 +104,7 @@ public void checkValueForPut(long value) {
}

public boolean empty(long entry) {
return (entry & entryMask) == UNSET_ENTRY;
return entry == UNSET_ENTRY;
}

public long key(long entry) {
Expand All @@ -124,42 +123,24 @@ public long hlPos(long key) {
return indexToPos(key & capacityMask);
}

public long step(long pos) {
return (pos += hashLookupEntrySize) <= capacityMask2 ? pos : 0L;
}
public abstract long step(long pos);

public long stepBack(long pos) {
return (pos -= hashLookupEntrySize) >= 0 ? pos : capacityMask2;
}
public abstract long stepBack(long pos);

public long readEntry(long addr, long pos) {
return UNSAFE.getLong(addr + pos);
}
public abstract long readEntry(long addr, long pos);

public void writeEntryVolatile(long addr, long pos, long prevEntry, long key, long value) {
long entry = (prevEntry & ~entryMask) | entry(key, value);
UNSAFE.putLongVolatile(null, addr + pos, entry);
}
public abstract void writeEntryVolatile(
long addr, long pos, long prevEntry, long key, long value);

public void putValueVolatile(long addr, long pos, long value) {
checkValueForPut(value);
long currentEntry = readEntry(addr, pos);
writeEntryVolatile(addr, pos, currentEntry, key(currentEntry), value);
}

void writeEntry(long addr, long pos, long prevEntry, long anotherEntry) {
long entry = (prevEntry & ~entryMask) | (anotherEntry & entryMask);
UNSAFE.putLong(addr + pos, entry);
}
abstract void writeEntry(long addr, long pos, long prevEntry, long anotherEntry);

void clearEntry(long addr, long pos, long prevEntry) {
long entry = (prevEntry & ~entryMask);
UNSAFE.putLong(addr + pos, entry);
}

public void clearHashLookup(long addr) {
UNSAFE.setMemory(addr, capacityMask2 + hashLookupEntrySize, (byte) 0);
}
abstract void clearEntry(long addr, long pos, long prevEntry);

/**
* Returns "insert" position in terms of consequent putValue()
Expand Down
@@ -0,0 +1,63 @@
/*
* Copyright (C) 2015 higherfrequencytrading.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

package net.openhft.chronicle.hash.impl;

import static net.openhft.lang.io.NativeBytes.UNSAFE;

public final class IntCompactOffHeapLinearHashTable extends CompactOffHeapLinearHashTable {

private static final long SCALE = 4L;

IntCompactOffHeapLinearHashTable(VanillaChronicleHash h) {
super(h);
}

@Override
long indexToPos(long index) {
return index * SCALE;
}

@Override
public long step(long pos) {
return (pos + SCALE) & capacityMask2;
}

@Override
public long stepBack(long pos) {
return (pos - SCALE) & capacityMask2;
}

@Override
public long readEntry(long addr, long pos) {
return UNSAFE.getInt(addr + pos);
}

@Override
public void writeEntryVolatile(long addr, long pos, long prevEntry, long key, long value) {
UNSAFE.putIntVolatile(null, addr + pos, (int) entry(key, value));
}

@Override
void writeEntry(long addr, long pos, long prevEntry, long anotherEntry) {
UNSAFE.putInt(addr + pos, (int) anotherEntry);
}

@Override
void clearEntry(long addr, long pos, long prevEntry) {
UNSAFE.putInt(addr + pos, 0);
}
}
@@ -0,0 +1,63 @@
/*
* Copyright (C) 2015 higherfrequencytrading.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

package net.openhft.chronicle.hash.impl;

import static net.openhft.lang.io.NativeBytes.UNSAFE;

public final class LongCompactOffHeapLinearHashTable extends CompactOffHeapLinearHashTable {

private static final long SCALE = 8L;

LongCompactOffHeapLinearHashTable(VanillaChronicleHash h) {
super(h);
}

@Override
long indexToPos(long index) {
return index * SCALE;
}

@Override
public long step(long pos) {
return (pos + SCALE) & capacityMask2;
}

@Override
public long stepBack(long pos) {
return (pos - SCALE) & capacityMask2;
}

@Override
public long readEntry(long addr, long pos) {
return UNSAFE.getLong(addr + pos);
}

@Override
public void writeEntryVolatile(long addr, long pos, long prevEntry, long key, long value) {
UNSAFE.putLongVolatile(null, addr + pos, entry(key, value));
}

@Override
void writeEntry(long addr, long pos, long prevEntry, long anotherEntry) {
UNSAFE.putLong(addr + pos, anotherEntry);
}

@Override
void clearEntry(long addr, long pos, long prevEntry) {
UNSAFE.putLong(addr + pos, 0L);
}
}
Expand Up @@ -255,7 +255,14 @@ public void initTransients() {
private void ownInitTransients() {
keyReaderProvider = Provider.of((Class) originalKeyReader.getClass());
keyInteropProvider = Provider.of((Class) originalKeyInterop.getClass());
hashLookup = new CompactOffHeapLinearHashTable(this);
if (segmentHashLookupEntrySize == 4) {
hashLookup = new IntCompactOffHeapLinearHashTable(this);
} else if (segmentHashLookupEntrySize == 8) {
hashLookup = new LongCompactOffHeapLinearHashTable(this);
} else {
throw new AssertionError("hash lookup slot size could be 4 or 8, " +
segmentHashLookupEntrySize + " observed");
}
}

public final void createMappedStoreAndSegments(BytesStore bytesStore) throws IOException {
Expand Down
12 changes: 5 additions & 7 deletions src/main/java/net/openhft/chronicle/map/ChronicleMapBuilder.java
Expand Up @@ -877,8 +877,8 @@ int actualSegments(boolean replicated) {
if (entriesPerSegment > 0) {
return (int) segmentsGivenEntriesPerSegmentFixed(entriesPerSegment, replicated);
}
// iteratively try to fit 3..8 bytes per hash lookup slot. Trying to apply small slot
// sizes (=> segment sizes, because slot size depends on segment size) not only because
// Try to fit 4 bytes per hash lookup slot, then 8. Trying to apply small slot
// size (=> segment size, because slot size depends on segment size) not only because
// they take less memory per entry (if entries are of KBs or MBs, it doesn't matter), but
// also because if segment size is small, slot and free list are likely to lie on a single
// memory page, reducing number of memory pages to update, if Chronicle Map is persisted.
Expand All @@ -898,11 +898,9 @@ int actualSegments(boolean replicated) {
// each segment. To compensate this at least on linux, don't accept segment sizes that with
// the given entry sizes, lead to too small total segment sizes in native memory pages,
// see comment in tryHashLookupSlotSize()
for (int hashLookupSlotSize = 3; hashLookupSlotSize <= 7; hashLookupSlotSize++) {
long segments = tryHashLookupSlotSize(hashLookupSlotSize, replicated);
if (segments > 0)
return (int) segments;
}
long segments = tryHashLookupSlotSize(4, replicated);
if (segments > 0)
return (int) segments;
long maxEntriesPerSegment = findMaxEntriesPerSegmentToFitHashLookupSlotSize(8, replicated);
long maxSegments = trySegments(maxEntriesPerSegment, MAX_SEGMENTS, replicated);
if (maxSegments > 0L)
Expand Down

0 comments on commit b113118

Please sign in to comment.